diff --git a/.github/codecov.yml b/.github/codecov.yml index 072d561668..cacd8f8180 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -1,42 +1,67 @@ # Source Network's codecov configuration file. + github_checks: - annotations: true + annotations: true # This won't work if patch is `false` or has flags. + codecov: - require_ci_to_pass: yes + require_ci_to_pass: false + allow_pseudo_compare: true + allow_coverage_offsets: true + coverage: precision: 2 - round: down - range: 65...100 + round: "nearest" + range: 60...90 status: - # Learn more at https://docs.codecov.io/docs/commit-status + project: default: - target: 50% - threshold: 0.05 # allow this much decrease on project - flags: - - unit - - # Disable patch as it is not correct and buggy. - # Folks over at amazon's aws and mozilla's firefox tv also did the same LOL: - # - https://github.com/aws/amazon-vpc-cni-k8s/pull/1226/files - # - https://github.com/mozilla-mobile/firefox-tv/pull/779/files + only_pulls: true # Only post the status if the commits are on a pull request. + informational: true # Don't fail codcov action because of project's coverage. + if_ci_failed: "error" # Give an error if CI fails (eg. upload to codecov failed). + if_not_found: "failure" # Fail if no report for HEAD found. + + # Note: Patch is needed for github annotations. patch: default: - enabled: no - if_not_found: success + informational: true # Don't fail codcov action because of patch's coverage. + if_ci_failed: "error" # Give an error if CI fails (eg. upload to codecov failed). + if_not_found: "failure" # Fail if no report for HEAD found. + + # Detect indirect coverage changes. + changes: + default: + informational: true # Don't fail codcov action because of indirect coverage changes. + if_ci_failed: "error" # Give an error if CI fails (eg. upload to codecov failed). + if_not_found: "failure" # Fail if no report for HEAD found. + + +parsers: + go: + partials_as_hits: false # Don't treat partials as hits. - changes: false comment: - layout: "reach, diff, files" - behavior: default # update if exists else create new - require_changes: true + # First the reach graph, then the diff, then the file changes. + layout: "newheader, reach, diff, flags, files, footer" + + # Change old comment with new results. + behavior: "default" + + # Post comment even if there were no changes. + require_changes: false + + # Post comment even if no head or base found. + require_head: false + require_base: false + ignore: - "tests" + - "**/mocks/*" - "**/*_test.go" - "**/*.pb.go" diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e277c8c8bc..e48d4303f1 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -8,3 +8,11 @@ updates: - "dependencies" commit-message: prefix: "bot" + - package-ecosystem: "npm" + directory: "/playground" + schedule: + interval: "weekly" + labels: + - "dependencies" + commit-message: + prefix: "bot" \ No newline at end of file diff --git a/.github/workflows/build-ami-with-packer.yml b/.github/workflows/build-ami-with-packer.yml deleted file mode 100644 index ee5392338b..0000000000 --- a/.github/workflows/build-ami-with-packer.yml +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2023 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Build AMI With Packer Workflow - -on: - push: - tags: ["v[0-9].[0-9]+.[0-9]+"] - -env: - PACKER_LOG: 1 - # RELEASE_VERSION: v0.5.0 - -jobs: - build-ami-with-packer: - name: Build ami with packer job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - - name: Environment version target - run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - # run: echo ${{ env.RELEASE_VERSION }} - - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_AMI_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_AMI_SECRET_ACCESS_KEY }} - aws-region: us-east-1 - - - name: Setup `packer` - uses: hashicorp/setup-packer@main - id: setup - with: - version: "latest" - - - name: Run `packer init` - id: init - run: "packer init ./tools/cloud/aws/packer/build_aws_ami.pkr.hcl" - - - name: Run `packer validate` - id: validate - run: "packer validate -var \"commit=${{ env.RELEASE_VERSION }}\" ./tools/cloud/aws/packer/build_aws_ami.pkr.hcl" - - - name: Run `packer build` - id: build - run: "packer build -var \"commit=${{ env.RELEASE_VERSION }}\" ./tools/cloud/aws/packer/build_aws_ami.pkr.hcl" diff --git a/.github/workflows/build-dependencies.yml b/.github/workflows/build-dependencies.yml index 4eeb238726..112f847192 100644 --- a/.github/workflows/build-dependencies.yml +++ b/.github/workflows/build-dependencies.yml @@ -12,10 +12,13 @@ name: Build Dependencies Workflow on: pull_request: + branches: + - master + - develop push: tags: - - v* + - 'v[0-9]+.[0-9]+.[0-9]+' branches: - master - develop @@ -34,7 +37,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Build all dependencies diff --git a/.github/workflows/build-then-deploy-ami.yml b/.github/workflows/build-then-deploy-ami.yml new file mode 100644 index 0000000000..ce6be1e0bd --- /dev/null +++ b/.github/workflows/build-then-deploy-ami.yml @@ -0,0 +1,116 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow builds the AMI using packer, if the build is successfull +# then it will deploy the AMI using terraform apply, onto AWS. +name: Build Then Deploy AMI Workflow + +on: + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + +env: + AWS_REGION: 'us-east-1' + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_AMI_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_AMI_SECRET_ACCESS_KEY }} + + # Logging verbosities (has to be named `PACKER_LOG` and `TF_LOG`). + PACKER_LOG: 1 + TF_LOG: INFO + + # Directories containing config files for AWS AMI. + PACKER_DIR: 'tools/cloud/aws/packer' + TF_DIR: 'tools/cloud/aws/terraform' + + # Set environment type for terraform: `dev`, `test`, `prod` + ENVIRONMENT_TYPE: "dev" + + # RELEASE_VERSION: v0.5.0 + +jobs: + # This job is responsilble to build the AMI using packer. + build-ami-with-packer: + name: Build ami with packer job + + runs-on: ubuntu-latest + + defaults: + run: + working-directory: ${{ env.PACKER_DIR }} + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Environment version target + run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> ${GITHUB_ENV} + # run: echo ${{ env.RELEASE_VERSION }} + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-region: ${{ env.AWS_REGION }} + aws-access-key-id: ${{ env.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ env.AWS_SECRET_ACCESS_KEY }} + + - name: Setup `packer` + uses: hashicorp/setup-packer@main + with: + version: "latest" + + - name: Run `packer init` + run: "packer init build_aws_ami.pkr.hcl" + + - name: Run `packer validate` + run: "packer validate -var \"commit=${{ env.RELEASE_VERSION }}\" build_aws_ami.pkr.hcl" + + - name: Run `packer build` + run: "packer build -var \"commit=${{ env.RELEASE_VERSION }}\" build_aws_ami.pkr.hcl" + + # This job is responsilble for deploying the built AMI onto AWS, using terraform apply. + deploy-ami-with-terraform-apply: + name: Deploy ami with terraform apply job + needs: + - build-ami-with-packer + + runs-on: ubuntu-latest + + defaults: + run: + working-directory: ${{ env.TF_DIR }} + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Terraform action setup + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.3.7 + + - name: Terraform format + run: terraform fmt -check + + - name: Terraform initialization + run: terraform init -backend-config="workspaces/${ENVIRONMENT_TYPE}-backend.conf" + + - name: Terraform workspace + # Select workspace if it exists, otherwise create a new workspace. + run: terraform workspace select ${ENVIRONMENT_TYPE} || terraform workspace new ${ENVIRONMENT_TYPE} + + - name: Terraform validation + run: terraform validate -no-color + + - name: List workspaces + run: ls workspaces + + - name: Terraform Apply + run: terraform apply -auto-approve -input=false -var-file="workspaces/source-ec2-${ENVIRONMENT_TYPE}.tfvars" diff --git a/.github/workflows/check-vulnerabilities.yml b/.github/workflows/check-vulnerabilities.yml new file mode 100644 index 0000000000..18e5f60de8 --- /dev/null +++ b/.github/workflows/check-vulnerabilities.yml @@ -0,0 +1,39 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +name: Check Vulnerabilities Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +jobs: + check-vulnerabilities: + name: Check vulnerabilities job + + runs-on: ubuntu-latest + + steps: + - name: Run govulncheck + uses: golang/govulncheck-action@v1 + with: + go-version-input: "1.20" + go-package: ./... + check-latest: true + cache: true diff --git a/.github/workflows/code-test-coverage.yml b/.github/workflows/code-test-coverage.yml index bb1be36296..65c0a92f1f 100644 --- a/.github/workflows/code-test-coverage.yml +++ b/.github/workflows/code-test-coverage.yml @@ -12,9 +12,16 @@ name: Code Test Coverage Workflow on: pull_request: + branches: + - master + - develop push: - + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop jobs: code-test-coverage: @@ -25,25 +32,45 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v3 - with: - fetch-depth: 2 - - name: Setup Go + - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Generate full test coverage report using go-acc run: make test:coverage - - name: Upload coverage to Codecov + - name: Upload coverage to Codecov without token, retry on failure + env: + codecov_secret: ${{ secrets.CODECOV_TOKEN }} + if: env.codecov_secret == '' + uses: Wandalen/wretry.action@v1.0.36 + with: + attempt_limit: 5 + attempt_delay: 10000 + action: codecov/codecov-action@v3 + with: | + name: defradb-codecov + files: ./coverage.txt + flags: all-tests + os: 'linux' + fail_ci_if_error: true + verbose: true + + - name: Upload coverage to Codecov with token + env: + codecov_secret: ${{ secrets.CODECOV_TOKEN }} + if: env.codecov_secret != '' uses: codecov/codecov-action@v3 with: - fail_ci_if_error: true + token: ${{ env.codecov_secret }} + name: defradb-codecov files: ./coverage.txt - flags: defra-tests - name: codecov-umbrella + flags: all-tests + os: 'linux' + fail_ci_if_error: true verbose: true # path_to_write_report: ./coverage/codecov_report.txt # directory: ./coverage/reports/ diff --git a/.github/workflows/deploy-ami-with-terraform.yml b/.github/workflows/deploy-ami-with-terraform.yml deleted file mode 100644 index 3ec9d074ab..0000000000 --- a/.github/workflows/deploy-ami-with-terraform.yml +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2023 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Deploy AMI With Terraform Workflow - -env: - # Verbosity setting for Terraform logs - TF_LOG: INFO - - # Credentials for deployment to AWS. - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_AMI_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_AMI_SECRET_ACCESS_KEY }} - - # Set environment type: dev, test, prod - ENVIRONMENT: "dev" - -on: - workflow_run: - workflows: ["Build AMI With Packer Workflow"] - types: - - completed - - pull_request: - -jobs: - deploy-ami-with-terraform: - name: Deploy ami with terraform job - - if: ${{ github.event.workflow_run.conclusion == 'success' }} - - runs-on: ubuntu-latest - - defaults: - run: - working-directory: tools/cloud/aws/terraform - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Terraform action setup - uses: hashicorp/setup-terraform@v2 - with: - terraform_version: 1.3.7 - - - name: Terraform format - id: fmt - run: terraform fmt -check - - - name: Terraform initialization - id: init - run: terraform init -backend-config="workspaces/$ENVIRONMENT-backend.conf" - - - name: Terraform workspace - id: wrokspace - run: terraform workspace select $ENVIRONMENT || terraform workspace new $ENVIRONMENT #Create workspace if it doesnt exist - - - name: Terraform validate - id: validate - run: terraform validate -no-color - - - name: Terraform plan - id: plan - if: github.event_name == 'pull_request' - run: terraform plan -no-color -input=false -var-file="workspaces/source-ec2-$ENVIRONMENT.tfvars" - continue-on-error: true - - - name: Update pull request - uses: actions/github-script@v6 - - if: github.event_name == 'pull_request' - - env: - PLAN: "terraform\n${{ steps.plan.outputs.stdout }}" - - with: - github-token: ${{ secrets.ONLY_DEFRADB_REPO_CI_PAT }} # Must have pull request write perms. - script: | - const output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\` - #### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\` - #### Terraform Validation 🤖\`${{ steps.validate.outcome }}\` - #### Terraform Plan 📖\`${{ steps.plan.outcome }}\` - Show Plan - \`\`\`\n - ${process.env.PLAN} - \`\`\` - - *Pushed by: @${{ github.actor }}, Action: \`${{ github.event_name }}\`*`; - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: output - }) - - - name: Terraform plan status - if: steps.plan.outcome == 'failure' - run: exit 1 - - - name: List workspaces - run: ls workspaces - - - name: Terraform Apply # Only runs if pushed - if: github.event_name != 'pull_request' - run: terraform apply -auto-approve -input=false -var-file="workspaces/source-ec2-$ENVIRONMENT.tfvars" diff --git a/.github/workflows/detect-change.yml b/.github/workflows/detect-change.yml index 65238e78da..b6272c21cd 100644 --- a/.github/workflows/detect-change.yml +++ b/.github/workflows/detect-change.yml @@ -12,10 +12,13 @@ name: Detect Change Workflow on: pull_request: + branches: + - master + - develop push: tags: - - v* + - 'v[0-9]+.[0-9]+.[0-9]+' branches: - master - develop @@ -33,7 +36,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Build dependencies diff --git a/.github/workflows/lint-then-benchmark.yml b/.github/workflows/lint-then-benchmark.yml index 9c1bdf42eb..015c8725c2 100644 --- a/.github/workflows/lint-then-benchmark.yml +++ b/.github/workflows/lint-then-benchmark.yml @@ -15,7 +15,7 @@ on: push: tags: - - v* + - 'v[0-9]+.[0-9]+.[0-9]+' branches: - master - develop @@ -57,7 +57,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Run the golangci-lint diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index e36f2ed49b..df2af79dd0 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -36,7 +36,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Check linting through golangci-lint @@ -46,7 +46,7 @@ jobs: # Required: the version of golangci-lint is required. # Note: The version should not pick the patch version as the latest patch # version is what will always be used. - version: v1.51 + version: v1.53 # Optional: working directory, useful for monorepos or if we wanted to run this # on a non-root directory. diff --git a/.github/workflows/preview-ami-with-terraform-plan.yml b/.github/workflows/preview-ami-with-terraform-plan.yml new file mode 100644 index 0000000000..ed2fef6f0c --- /dev/null +++ b/.github/workflows/preview-ami-with-terraform-plan.yml @@ -0,0 +1,135 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +name: Preview AMI With Terraform Plan Workflow + +on: + pull_request: + branches: + - master + - develop + paths: + - '.github/workflows/preview-ami-with-terraform-plan.yml' + - '.github/workflows/build-then-deploy-ami.yml' + - 'tools/cloud/aws/**' + + +env: + # Verbosity setting for terraform logs (has to be named `TF_LOG`). + TF_LOG: INFO + + # Directory containing terraform config files. + TF_DIR: 'tools/cloud/aws/terraform' + + # Set environment type: dev, test, prod + ENVIRONMENT_TYPE: "dev" + + # Even though we don't see these being used directly, terraform needs these set. + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_AMI_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_AMI_SECRET_ACCESS_KEY }} + + +jobs: + preview-ami-with-terraform-plan: + name: Preview ami with terraform plan job + runs-on: ubuntu-latest + + defaults: + run: + working-directory: ${{ env.TF_DIR }} + + steps: + - name: Stop and notify the use of unprivileged flow or missing tokens + if: env.AWS_ACCESS_KEY_ID == '' || env.AWS_SECRET_ACCESS_KEY == '' + # Note: Fail this step, as we don't want unprivileged access doing these changes. + uses: actions/github-script@v6 + with: + script: | + let unprivileged_warning = + 'Warning: you made changes to files that require privileged access, this means' + + ' you are either using the fork-flow, or are missing some secrets.\n' + + 'Solution: please use branch-flow, or add the missing secrets. If you are not' + + ' an internal developer, please reach out to a maintainer for assistance.\n' + + 'Note: the files that were changed also require manual testing' + + ' using our organization AWS account, and using manual triggers on' + + ' some of our workflows (that are not triggered normally).\n' + + 'Pushed by: @${{ github.actor }}, SHA: \`${{ github.event.pull_request.head.sha }}\`\n'; + core.setFailed(unprivileged_warning) + + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Terraform action setup + uses: hashicorp/setup-terraform@v2 + with: + terraform_version: 1.3.7 + + - name: Terraform format + id: terraform-format + run: terraform fmt -check + + - name: Terraform initialization + id: terraform-initialization + run: terraform init -backend-config="workspaces/${ENVIRONMENT_TYPE}-backend.conf" + + - name: Terraform workspace + # Select workspace if it exists, otherwise create a new workspace. + run: terraform workspace select ${ENVIRONMENT_TYPE} || terraform workspace new ${ENVIRONMENT_TYPE} + + - name: Terraform validation + id: terraform-validation + run: terraform validate -no-color + + - name: Terraform plan + id: terraform-plan + run: terraform plan -no-color -input=false -var-file="workspaces/source-ec2-${ENVIRONMENT_TYPE}.tfvars" + continue-on-error: true + + - name: Comment results on pull request + uses: actions/github-script@v6 + env: + TERRAFORM_PLAN_OUTPUT: "Terraform Plan Output:\n${{ steps.terraform-plan.outputs.stdout }}\n" + + with: + github-token: ${{ secrets.ONLY_DEFRADB_REPO_CI_PAT }} # Must have pull request write perms. + script: | + const terraform_plan_output = ` + #### Terraform Format and Style \`${{ steps.terraform-format.outcome }}\` + #### Terraform Initialization \`${{ steps.terraform-initialization.outcome }}\` + #### Terraform Validation \`${{ steps.terraform-validation.outcome }}\` + #### Terraform Plan \`${{ steps.terraform-plan.outcome }}\` + + + + Show Plan + + \`\`\`\n + ${process.env.TERRAFORM_PLAN_OUTPUT} + \`\`\`\n + + + + ***Pushed By: @${{ github.actor }}*** + ***SHA: \`${{ github.event.pull_request.head.sha }}\`*** + `; + + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: terraform_plan_output + }) + + - name: Terraform plan failure + if: steps.terraform-plan.outcome == 'failure' + run: exit 1 + + - name: List workspaces + run: ls workspaces diff --git a/.github/workflows/pull-docker-image.yml b/.github/workflows/pull-docker-image.yml new file mode 100644 index 0000000000..eb0170b7ef --- /dev/null +++ b/.github/workflows/pull-docker-image.yml @@ -0,0 +1,51 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow validates that the images pushed to the container +# registries can be pulled then run sucessfully. +name: Pull Docker Image Workflow + +on: + workflow_run: + # Warning: this workflow must NOT: + # - interact with any new code. + # - checkout new code. + # - build/compile anything (only pull). + # - make any indirect calls (i.e. make xyz, or npm install, etc.) + # Note this workflow: + # - will use the base's (or default) workflow file's state. + # - doesn't run on the PR or the branch coming in, it runs on the default branch. + # - has read-write repo token + # - has access to secrets + workflows: ["Push Docker Image To Registries Workflow"] + types: + - completed + +jobs: + pull-docker-image: + name: Pull docker image job + + if: ${{ github.event.workflow_run.conclusion == 'success' }} + + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + image_tag: + - sourcenetwork/defradb:latest + - ghcr.io/sourcenetwork/defradb:latest + + steps: + - name: Pull Docker image + run: docker pull ${{ matrix.image_tag }} + + - name: Test Docker image + run: docker run --rm ${{ matrix.image_tag }} diff --git a/.github/workflows/push-docker-image-to-registries.yml b/.github/workflows/push-docker-image-to-registries.yml new file mode 100644 index 0000000000..d7d00d14aa --- /dev/null +++ b/.github/workflows/push-docker-image-to-registries.yml @@ -0,0 +1,83 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow builds a Docker container image, if the build is successful +# then it will deploy the image to DockerHub & GitHub container registries. +name: Push Docker Image To Registries Workflow + +on: + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + +env: + TEST_TAG: sourcenetwork/defradb:test + +jobs: + push-docker-image-to-registries: + name: Push Docker image to registries job + + runs-on: ubuntu-latest + + permissions: + packages: write + contents: read + + steps: + - name: Check out the repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build Docker image + uses: docker/build-push-action@v4 + with: + context: . + file: tools/defradb.containerfile + load: true + tags: ${{ env.TEST_TAG }} + labels: ${{ steps.meta.outputs.labels }} + + - name: Test Docker image + run: docker run --rm ${{ env.TEST_TAG }} + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Log in to the Container registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v4 + with: + images: | + sourcenetwork/defradb + ghcr.io/${{ github.repository }} + + - name: Push Docker images + uses: docker/build-push-action@v4 + with: + context: . + file: tools/defradb.containerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 1bab3e5a05..bfa696a283 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -12,6 +12,9 @@ name: Run Tests Workflow on: pull_request: + branches: + - master + - develop push: @@ -28,7 +31,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Build dependencies diff --git a/.github/workflows/start-binary.yml b/.github/workflows/start-binary.yml index 97db056df9..267466b8a3 100644 --- a/.github/workflows/start-binary.yml +++ b/.github/workflows/start-binary.yml @@ -12,10 +12,13 @@ name: Start Binary Workflow on: pull_request: + branches: + - master + - develop push: tags: - - v* + - 'v[0-9]+.[0-9]+.[0-9]+' branches: - master - develop @@ -34,7 +37,7 @@ jobs: - name: Setup Go environment explicitly uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" check-latest: true - name: Build modules diff --git a/.github/workflows/validate-containerfile.yml b/.github/workflows/validate-containerfile.yml new file mode 100644 index 0000000000..b3315861ad --- /dev/null +++ b/.github/workflows/validate-containerfile.yml @@ -0,0 +1,57 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +# This workflow tests that the container build is successful and +# that the built container runs successfully. +name: Validate Containerfile Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +env: + TEST_TAG: sourcenetwork/defradb:test + +jobs: + validate-containerfile: + name: Validate containerfile job + + runs-on: ubuntu-latest + + steps: + - name: Check out the repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build Docker image + uses: docker/build-push-action@v4 + with: + context: . + file: tools/defradb.containerfile + load: true + tags: ${{ env.TEST_TAG }} + + - name: Test Docker image + run: docker run --rm ${{ env.TEST_TAG }} + diff --git a/.gitignore b/.gitignore index 826d4d912b..b19a6d9259 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,16 @@ coverage.txt tests/bench/*.log tests/bench/*.svg +tests/lenses/rust_wasm32_set_default/Cargo.lock +tests/lenses/rust_wasm32_set_default/target +tests/lenses/rust_wasm32_set_default/pkg +tests/lenses/rust_wasm32_remove/Cargo.lock +tests/lenses/rust_wasm32_remove/target +tests/lenses/rust_wasm32_remove/pkg +tests/lenses/rust_wasm32_copy/Cargo.lock +tests/lenses/rust_wasm32_copy/target +tests/lenses/rust_wasm32_copy/pkg + # Ignore OS X metadata files. .history **.DS_Store diff --git a/CHANGELOG.md b/CHANGELOG.md index da957eb28f..3638d3ef75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,88 @@ + + +## [v0.6.0](https://github.com/sourcenetwork/defradb/compare/v0.5.1...v0.6.0) + +> 2023-07-31 + +DefraDB v0.6 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +There are several new and powerful features, important bug fixes, and notable refactors in this release. Some highlight features include: The initial release of our LensVM based schema migration engine powered by WebAssembly ([#1650](https://github.com/sourcenetwork/defradb/issues/1650)), newly embedded DefraDB Playround which includes a bundled GraphQL client and schema manager, and last but not least a relation field (_id) alias to improve the developer experience ([#1609](https://github.com/sourcenetwork/defradb/issues/1609)). + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.5.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.source.network/. + +### Features + +* Add `_not` operator ([#1631](https://github.com/sourcenetwork/defradb/issues/1631)) +* Schema list API ([#1625](https://github.com/sourcenetwork/defradb/issues/1625)) +* Add simple data import and export ([#1630](https://github.com/sourcenetwork/defradb/issues/1630)) +* Playground ([#1575](https://github.com/sourcenetwork/defradb/issues/1575)) +* Add schema migration get and set cmds to CLI ([#1650](https://github.com/sourcenetwork/defradb/issues/1650)) +* Allow relation alias on create and update ([#1609](https://github.com/sourcenetwork/defradb/issues/1609)) +* Make fetcher calculate docFetches and fieldFetches ([#1713](https://github.com/sourcenetwork/defradb/issues/1713)) +* Add lens migration engine to defra ([#1564](https://github.com/sourcenetwork/defradb/issues/1564)) +* Add `_keys` attribute to `selectNode` simple explain ([#1546](https://github.com/sourcenetwork/defradb/issues/1546)) +* CLI commands for secondary indexes ([#1595](https://github.com/sourcenetwork/defradb/issues/1595)) +* Add alias to `groupBy` related object ([#1579](https://github.com/sourcenetwork/defradb/issues/1579)) +* Non-unique secondary index (no querying) ([#1450](https://github.com/sourcenetwork/defradb/issues/1450)) +* Add ability to explain-debug all nodes ([#1563](https://github.com/sourcenetwork/defradb/issues/1563)) +* Include dockey in doc exists err ([#1558](https://github.com/sourcenetwork/defradb/issues/1558)) + +### Fixes + +* Better wait in CLI integration test ([#1415](https://github.com/sourcenetwork/defradb/issues/1415)) +* Return error when relation is not defined on both types ([#1647](https://github.com/sourcenetwork/defradb/issues/1647)) +* Change `core.DocumentMapping` to pointer ([#1528](https://github.com/sourcenetwork/defradb/issues/1528)) +* Fix invalid (badger) datastore state ([#1685](https://github.com/sourcenetwork/defradb/issues/1685)) +* Discard index and subscription implicit transactions ([#1715](https://github.com/sourcenetwork/defradb/issues/1715)) +* Remove duplicated `peers` in peerstore prefix ([#1678](https://github.com/sourcenetwork/defradb/issues/1678)) +* Return errors from typeJoinOne ([#1716](https://github.com/sourcenetwork/defradb/issues/1716)) +* Document change detector breaking change ([#1531](https://github.com/sourcenetwork/defradb/issues/1531)) +* Standardise `schema migration` CLI errors ([#1682](https://github.com/sourcenetwork/defradb/issues/1682)) +* Introspection OrderArg returns null inputFields ([#1633](https://github.com/sourcenetwork/defradb/issues/1633)) +* Avoid duplicated requestable fields ([#1621](https://github.com/sourcenetwork/defradb/issues/1621)) +* Normalize int field kind ([#1619](https://github.com/sourcenetwork/defradb/issues/1619)) +* Change the WriteSyncer to use lock when piping ([#1608](https://github.com/sourcenetwork/defradb/issues/1608)) +* Filter splitting and rendering for related types ([#1541](https://github.com/sourcenetwork/defradb/issues/1541)) + +### Documentation + +* Improve CLI command documentation ([#1505](https://github.com/sourcenetwork/defradb/issues/1505)) + +### Refactoring + +* Schema list output to include schemaVersionID ([#1706](https://github.com/sourcenetwork/defradb/issues/1706)) +* Reuse lens wasm modules ([#1641](https://github.com/sourcenetwork/defradb/issues/1641)) +* Remove redundant txn param from fetcher start ([#1635](https://github.com/sourcenetwork/defradb/issues/1635)) +* Remove first CRDT byte from field encoded values ([#1622](https://github.com/sourcenetwork/defradb/issues/1622)) +* Merge `node` into `net` and improve coverage ([#1593](https://github.com/sourcenetwork/defradb/issues/1593)) +* Fetcher filter and field optimization ([#1500](https://github.com/sourcenetwork/defradb/issues/1500)) + +### Testing + +* Rework transaction test framework capabilities ([#1603](https://github.com/sourcenetwork/defradb/issues/1603)) +* Expand backup integration tests ([#1699](https://github.com/sourcenetwork/defradb/issues/1699)) +* Disable test ([#1675](https://github.com/sourcenetwork/defradb/issues/1675)) +* Add tests for 1-1 group by id ([#1655](https://github.com/sourcenetwork/defradb/issues/1655)) +* Remove CLI tests from make test ([#1643](https://github.com/sourcenetwork/defradb/issues/1643)) +* Bundle test state into single var ([#1645](https://github.com/sourcenetwork/defradb/issues/1645)) +* Convert explain group tests to new explain setup ([#1537](https://github.com/sourcenetwork/defradb/issues/1537)) +* Add tests for foo_id field name clashes ([#1521](https://github.com/sourcenetwork/defradb/issues/1521)) +* Resume wait correctly following test node restart ([#1515](https://github.com/sourcenetwork/defradb/issues/1515)) +* Require no errors when none expected ([#1509](https://github.com/sourcenetwork/defradb/issues/1509)) + +### Continuous integration + +* Add workflows to push, pull, and validate docker images ([#1676](https://github.com/sourcenetwork/defradb/issues/1676)) +* Build mocks using make ([#1612](https://github.com/sourcenetwork/defradb/issues/1612)) +* Fix terraform plan and merge AMI build + deploy workflow ([#1514](https://github.com/sourcenetwork/defradb/issues/1514)) +* Reconfigure CodeCov action to ensure stability ([#1414](https://github.com/sourcenetwork/defradb/issues/1414)) + +### Chore + +* Bump to GoLang v1.20 ([#1689](https://github.com/sourcenetwork/defradb/issues/1689)) +* Update to ipfs boxo 0.10.0 ([#1573](https://github.com/sourcenetwork/defradb/issues/1573)) + + ## [v0.5.1](https://github.com/sourcenetwork/defradb/compare/v0.5.0...v0.5.1) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fedad36f7e..c7cfb9b590 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -55,6 +55,13 @@ Run the following commands for testing: - `make bench` to run the benchmark suite. To compare a branch's results with the `develop` branch results, execute the suite on both branches, output the results to files, and compare them with a tool like `benchstat` (e.g., `benchstat develop.txt current.txt`). To install `benchstat`, use `make deps:bench`. - `make test:changes` to run a test suite detecting breaking changes. Accompany breaking changes with documentation in `docs/data_format_changes/` for the test to pass. +### Test prerequisites + +The following tools are required in order to build and run the tests within this repository: + +- [Go](https://go.dev/doc/install) +- Cargo/rustc, typically installed via [rustup](https://www.rust-lang.org/tools/install) + ## Documentation The overall project documentation can be found at [docs.source.network](https://docs.source.network), and its source at [github.com/sourcenetwork/docs.source.network](https://github.com/sourcenetwork/docs.source.network). diff --git a/Makefile b/Makefile index 78ba5ceced..0e79f59646 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,16 @@ BUILD_FLAGS=-trimpath -ldflags "\ -X 'github.com/sourcenetwork/defradb/version.GitCommitDate=$(VERSION_GITCOMMITDATE)'" endif -TEST_FLAGS=-race -shuffle=on -timeout 60s +ifdef BUILD_TAGS +BUILD_FLAGS+=-tags $(BUILD_TAGS) +endif + +TEST_FLAGS=-race -shuffle=on -timeout 150s + +PLAYGROUND_DIRECTORY=playground +LENS_TEST_DIRECTORY=tests/integration/schema/migrations +CLI_TEST_DIRECTORY=tests/integration/cli +DEFAULT_TEST_DIRECTORIES=$$(go list ./... | grep -v -e $(LENS_TEST_DIRECTORY) -e $(CLI_TEST_DIRECTORY)) default: @go run $(BUILD_FLAGS) cmd/defradb/main.go @@ -67,15 +76,21 @@ client\:add-schema: .PHONY: deps\:lint deps\:lint: - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53 .PHONY: deps\:test deps\:test: go install gotest.tools/gotestsum@latest +.PHONY: deps\:lens +deps\:lens: + rustup target add wasm32-unknown-unknown + @$(MAKE) -C ./tests/lenses build + .PHONY: deps\:coverage deps\:coverage: go install github.com/ory/go-acc@latest + @$(MAKE) deps:lens .PHONY: deps\:bench deps\:bench: @@ -89,6 +104,14 @@ deps\:chglog: deps\:modules: go mod download +.PHONY: deps\:mock +deps\:mock: + go install github.com/vektra/mockery/v2@v2.30.1 + +.PHONY: deps\:playground +deps\:playground: + cd $(PLAYGROUND_DIRECTORY) && npm install && npm run build + .PHONY: deps deps: @$(MAKE) deps:modules && \ @@ -96,7 +119,22 @@ deps: $(MAKE) deps:chglog && \ $(MAKE) deps:coverage && \ $(MAKE) deps:lint && \ - $(MAKE) deps:test + $(MAKE) deps:test && \ + $(MAKE) deps:mock + +.PHONY: mock +mock: + @$(MAKE) deps:mock + mockery --dir ./client --output ./client/mocks --name DB --with-expecter + mockery --dir ./client --output ./client/mocks --name Collection --with-expecter + mockery --dir ./datastore --output ./datastore/mocks --name DAGStore --with-expecter + mockery --dir ./datastore --output ./datastore/mocks --name DSReaderWriter --with-expecter + mockery --srcpkg github.com/ipfs/go-datastore/query --output ./datastore/mocks --name Results --with-expecter + mockery --dir ./datastore --output ./datastore/mocks --name RootStore --with-expecter + mockery --dir ./datastore --output ./datastore/mocks --name Txn --with-expecter + mockery --dir ./datastore --output ./datastore/mocks --name DAGStore --with-expecter + mockery --dir ./db/fetcher --output ./db/fetcher/mocks --name Fetcher --with-expecter + mockery --dir ./db/fetcher --output ./db/fetcher/mocks --name EncodedDocument --with-expecter .PHONY: dev\:start dev\:start: @@ -120,7 +158,7 @@ verify: .PHONY: tidy tidy: - go mod tidy -go=1.19 + go mod tidy -go=1.20 .PHONY: clean clean: @@ -145,32 +183,42 @@ endif .PHONY: test test: - gotestsum --format pkgname -- ./... $(TEST_FLAGS) + gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) + +.PHONY: test\:quick +test\:quick: + gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) # Only build the tests (don't execute them). .PHONY: test\:build test\:build: - gotestsum --format pkgname -- ./... $(TEST_FLAGS) -run=nope + gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) -run=nope .PHONY: test\:ci test\:ci: - DEFRA_BADGER_MEMORY=true DEFRA_BADGER_FILE=true $(MAKE) test:names + DEFRA_BADGER_MEMORY=true DEFRA_BADGER_FILE=true $(MAKE) test:all .PHONY: test\:go test\:go: - go test ./... $(TEST_FLAGS) + go test $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) .PHONY: test\:names test\:names: - gotestsum --format testname -- ./... $(TEST_FLAGS) + gotestsum --format testname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) + +.PHONY: test\:all +test\:all: + @$(MAKE) test:names + @$(MAKE) test:lens + @$(MAKE) test:cli .PHONY: test\:verbose test\:verbose: - gotestsum --format standard-verbose -- ./... $(TEST_FLAGS) + gotestsum --format standard-verbose -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) .PHONY: test\:watch test\:watch: - gotestsum --watch -- ./... + gotestsum --watch -- $(DEFAULT_TEST_DIRECTORIES) .PHONY: test\:clean test\:clean: @@ -188,6 +236,16 @@ test\:bench-short: test\:scripts: @$(MAKE) -C ./tools/scripts/ test +.PHONY: test\:lens +test\:lens: + @$(MAKE) deps:lens + gotestsum --format testname -- ./$(LENS_TEST_DIRECTORY)/... $(TEST_FLAGS) + +.PHONY: test\:cli +test\:cli: + @$(MAKE) deps:lens + gotestsum --format testname -- ./$(CLI_TEST_DIRECTORY)/... $(TEST_FLAGS) + # Using go-acc to ensure integration tests are included. # Usage: `make test:coverage` or `make test:coverage path="{pathToPackage}"` # Example: `make test:coverage path="./api/..."` @@ -195,10 +253,10 @@ test\:scripts: test\:coverage: @$(MAKE) deps:coverage ifeq ($(path),) - go-acc ./... --output=coverage.txt --covermode=atomic -- -coverpkg=./... + go-acc ./... --output=coverage.txt --covermode=atomic -- -failfast -coverpkg=./... @echo "Show coverage information for each function in ./..." else - go-acc $(path) --output=coverage.txt --covermode=atomic -- -coverpkg=$(path) + go-acc $(path) --output=coverage.txt --covermode=atomic -- -failfast -coverpkg=$(path) @echo "Show coverage information for each function in" path=$(path) endif go tool cover -func coverage.txt | grep total | awk '{print $$3}' @@ -214,6 +272,7 @@ test\:coverage-html: .PHONY: test\:changes test\:changes: + @$(MAKE) deps:lens env DEFRA_DETECT_DATABASE_CHANGES=true gotestsum -- ./... -shuffle=on -p 1 .PHONY: validate\:codecov diff --git a/README.md b/README.md index f1f1925c83..8428ebc77f 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,8 @@ Read the documentation on [docs.source.network](https://docs.source.network/). - [Collection subscription example](#collection-subscription-example) - [Replicator example](#replicator-example) - [Securing the HTTP API with TLS](#securing-the-http-api-with-tls) +- [Supporting CORS](#supporting-cors) +- [Backing up and restoring](#backing-up-and-restoring) - [Licensing](#licensing) - [Contributors](#contributors) @@ -272,7 +274,7 @@ About the flags: - `--rootdir` specifies the root dir (config and data) to use - `--url` is the address to listen on for the client HTTP and GraphQL API -- `--p2paddr` is the multiaddress for the p2p networking to listen on +- `--p2paddr` is the multiaddress for the P2P networking to listen on - `--tcpaddr` is the multiaddress for the gRPC server to listen on - `--peers` is a comma-separated list of peer multiaddresses @@ -387,6 +389,25 @@ defradb start --allowed-origins=http://localhost:3000 The catch-all `*` is also a valid origin. +## Backing up and restoring + +It is currently not possible to do a full backup of DefraDB that includes the history of changes through the Merkle DAG. However, DefraDB currently supports a simple backup of the current data state in JSON format that can be used to seed a database or help with transitioning from one DefraDB version to another. + +To backup the data, run the following command: +```shell +defradb client backup export path/to/backup.json +``` + +To pretty print the JSON content when exporting, run the following command: +```shell +defradb client backup export --pretty path/to/backup.json +``` + +To restore the data, run the following command: +```shell +defradb client backup import path/to/backup.json +``` + ## Community Discuss on [Discord](https://discord.source.network/) or [Github Discussions](https://github.com/sourcenetwork/defradb/discussions). The Source project is on [Twitter](https://twitter.com/sourcenetwrk). diff --git a/api/http/errors.go b/api/http/errors.go index 91a235543d..4acf9abd25 100644 --- a/api/http/errors.go +++ b/api/http/errors.go @@ -36,6 +36,8 @@ var ( ErrPeerIdUnavailable = errors.New("no PeerID available. P2P might be disabled") ErrStreamingUnsupported = errors.New("streaming unsupported") ErrNoEmail = errors.New("email address must be specified for tls with autocert") + ErrPayloadFormat = errors.New("invalid payload format") + ErrMissingNewKey = errors.New("missing _newKey for imported doc") ) // ErrorResponse is the GQL top level object holding error items for the response payload. diff --git a/api/http/handler.go b/api/http/handler.go index 6ab802df12..aa7b828f29 100644 --- a/api/http/handler.go +++ b/api/http/handler.go @@ -18,6 +18,7 @@ import ( "net/http" "github.com/go-chi/chi/v5" + "github.com/go-chi/cors" "github.com/pkg/errors" "github.com/sourcenetwork/defradb/client" @@ -68,25 +69,38 @@ func simpleDataResponse(args ...any) DataResponse { // newHandler returns a handler with the router instantiated. func newHandler(db client.DB, opts serverOptions) *handler { + mux := chi.NewRouter() + mux.Use(loggerMiddleware) + + if len(opts.allowedOrigins) != 0 { + mux.Use(cors.Handler(cors.Options{ + AllowedOrigins: opts.allowedOrigins, + AllowedMethods: []string{"GET", "POST", "PATCH", "OPTIONS"}, + AllowedHeaders: []string{"Content-Type"}, + MaxAge: 300, + })) + } + + mux.Use(func(next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + if opts.tls.HasValue() { + rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains") + } + ctx := context.WithValue(req.Context(), ctxDB{}, db) + if opts.peerID != "" { + ctx = context.WithValue(ctx, ctxPeerID{}, opts.peerID) + } + next.ServeHTTP(rw, req.WithContext(ctx)) + }) + }) + return setRoutes(&handler{ + Mux: mux, db: db, options: opts, }) } -func (h *handler) handle(f http.HandlerFunc) http.HandlerFunc { - return func(rw http.ResponseWriter, req *http.Request) { - if h.options.tls.HasValue() { - rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains") - } - ctx := context.WithValue(req.Context(), ctxDB{}, h.db) - if h.options.peerID != "" { - ctx = context.WithValue(ctx, ctxPeerID{}, h.options.peerID) - } - f(rw, req.WithContext(ctx)) - } -} - func getJSON(req *http.Request, v any) error { err := json.NewDecoder(req.Body).Decode(v) if err != nil { diff --git a/api/http/handlerfuncs.go b/api/http/handlerfuncs.go index d6e90af778..9e5b212fe3 100644 --- a/api/http/handlerfuncs.go +++ b/api/http/handlerfuncs.go @@ -24,9 +24,10 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" "github.com/multiformats/go-multihash" - "github.com/pkg/errors" + "github.com/sourcenetwork/defradb/client" corecrdt "github.com/sourcenetwork/defradb/core/crdt" + "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" ) @@ -52,7 +53,7 @@ func pingHandler(rw http.ResponseWriter, req *http.Request) { sendJSON( req.Context(), rw, - simpleDataResponse("response", "pong", "test"), + simpleDataResponse("response", "pong"), http.StatusOK, ) } @@ -155,9 +156,65 @@ func execGQLHandler(rw http.ResponseWriter, req *http.Request) { sendJSON(req.Context(), rw, newGQLResult(result.GQL), http.StatusOK) } +type fieldResponse struct { + ID string `json:"id"` + Name string `json:"name"` + Kind string `json:"kind"` + Internal bool `json:"internal"` +} + type collectionResponse struct { - Name string `json:"name"` - ID string `json:"id"` + Name string `json:"name"` + ID string `json:"id"` + VersionID string `json:"version_id"` + Fields []fieldResponse `json:"fields,omitempty"` +} + +func listSchemaHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + cols, err := db.GetAllCollections(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + colResp := make([]collectionResponse, len(cols)) + for i, col := range cols { + var fields []fieldResponse + for _, field := range col.Schema().Fields { + fieldRes := fieldResponse{ + ID: field.ID.String(), + Name: field.Name, + Internal: field.IsInternal(), + } + if field.IsObjectArray() { + fieldRes.Kind = fmt.Sprintf("[%s]", field.Schema) + } else if field.IsObject() { + fieldRes.Kind = field.Schema + } else { + fieldRes.Kind = field.Kind.String() + } + fields = append(fields, fieldRes) + } + colResp[i] = collectionResponse{ + Name: col.Name(), + ID: col.SchemaID(), + VersionID: col.Schema().VersionID, + Fields: fields, + } + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("collections", colResp), + http.StatusOK, + ) } func loadSchemaHandler(rw http.ResponseWriter, req *http.Request) { @@ -187,8 +244,9 @@ func loadSchemaHandler(rw http.ResponseWriter, req *http.Request) { return } colResp[i] = collectionResponse{ - Name: col.Name(), - ID: col.SchemaID(), + Name: col.Name(), + ID: col.SchemaID(), + VersionID: col.Schema().VersionID, } } @@ -227,6 +285,73 @@ func patchSchemaHandler(rw http.ResponseWriter, req *http.Request) { ) } +func setMigrationHandler(rw http.ResponseWriter, req *http.Request) { + cfgStr, err := readWithLimit(req.Body, rw) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + txn, err := db.NewTxn(req.Context(), false) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + var cfg client.LensConfig + err = json.Unmarshal(cfgStr, &cfg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + err = db.LensRegistry().SetMigration(req.Context(), txn, cfg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + err = txn.Commit(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("result", "success"), + http.StatusOK, + ) +} + +func getMigrationHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + cfgs := db.LensRegistry().Config() + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("configuration", cfgs), + http.StatusOK, + ) +} + func getBlockHandler(rw http.ResponseWriter, req *http.Request) { cidStr := chi.URLParam(req, "cid") diff --git a/api/http/handlerfuncs_backup.go b/api/http/handlerfuncs_backup.go new file mode 100644 index 0000000000..3961263995 --- /dev/null +++ b/api/http/handlerfuncs_backup.go @@ -0,0 +1,123 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "context" + "net/http" + "os" + "strings" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/errors" +) + +func exportHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + cfg := &client.BackupConfig{} + err = getJSON(req, cfg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + err = validateBackupConfig(req.Context(), cfg, db) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + err = db.BasicExport(req.Context(), cfg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("result", "success"), + http.StatusOK, + ) +} + +func importHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + cfg := &client.BackupConfig{} + err = getJSON(req, cfg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + err = validateBackupConfig(req.Context(), cfg, db) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + err = db.BasicImport(req.Context(), cfg.Filepath) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("result", "success"), + http.StatusOK, + ) +} + +func validateBackupConfig(ctx context.Context, cfg *client.BackupConfig, db client.DB) error { + if !isValidPath(cfg.Filepath) { + return errors.New("invalid file path") + } + + if cfg.Format != "" && strings.ToLower(cfg.Format) != "json" { + return errors.New("only JSON format is supported at the moment") + } + for _, colName := range cfg.Collections { + _, err := db.GetCollectionByName(ctx, colName) + if err != nil { + return errors.Wrap("collection does not exist", err) + } + } + return nil +} + +func isValidPath(filepath string) bool { + // if a file exists, return true + if _, err := os.Stat(filepath); err == nil { + return true + } + + // if not, attempt to write to the path and if successful, + // remove the file and return true + var d []byte + if err := os.WriteFile(filepath, d, 0o644); err == nil { + _ = os.Remove(filepath) + return true + } + + return false +} diff --git a/api/http/handlerfuncs_backup_test.go b/api/http/handlerfuncs_backup_test.go new file mode 100644 index 0000000000..67af6015a1 --- /dev/null +++ b/api/http/handlerfuncs_backup_test.go @@ -0,0 +1,623 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/mocks" + "github.com/sourcenetwork/defradb/errors" +) + +func TestExportHandler_WithNoDB_NoDatabaseAvailableError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: nil, + Method: "POST", + Path: ExportPath, + Body: nil, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) +} + +func TestExportHandler_WithWrongPayload_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + buf := bytes.NewBuffer([]byte("[]")) + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "json: cannot unmarshal array into Go value of type client.BackupConfig") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "unmarshal error: json: cannot unmarshal array into Go value of type client.BackupConfig", errResponse.Errors[0].Message) +} + +func TestExportHandler_WithInvalidFilePath_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + filepath := t.TempDir() + "/some/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid file path") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "invalid file path", errResponse.Errors[0].Message) +} + +func TestExportHandler_WithInvalidFomat_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + Format: "csv", + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "only JSON format is supported at the moment") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "only JSON format is supported at the moment", errResponse.Errors[0].Message) +} + +func TestExportHandler_WithInvalidCollection_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + Format: "json", + Collections: []string{"invalid"}, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "collection does not exist: datastore: key not found") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "collection does not exist: datastore: key not found", errResponse.Errors[0].Message) +} + +func TestExportHandler_WithBasicExportError_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + db := mocks.NewDB(t) + testError := errors.New("test error") + db.EXPECT().BasicExport(mock.Anything, mock.Anything).Return(testError) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: db, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "test error") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "test error", errResponse.Errors[0].Message) +} + +func TestExportHandler_AllCollections_NoError(t *testing.T) { + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + testLoadSchema(t, ctx, defra) + + col, err := defra.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + respBody := testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 200, + }) + + b, err = os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{"data":{"result":"success"}}`, + string(respBody), + ) + + require.Equal( + t, + `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`, + string(b), + ) +} + +func TestExportHandler_UserCollection_NoError(t *testing.T) { + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + testLoadSchema(t, ctx, defra) + + col, err := defra.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + Collections: []string{"User"}, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + respBody := testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 200, + }) + + b, err = os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{"data":{"result":"success"}}`, + string(respBody), + ) + + require.Equal( + t, + `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`, + string(b), + ) +} + +func TestExportHandler_UserCollectionWithModifiedDoc_NoError(t *testing.T) { + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + testLoadSchema(t, ctx, defra) + + col, err := defra.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + err = doc.Set("points", 1000) + require.NoError(t, err) + + err = col.Update(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + Collections: []string{"User"}, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + respBody := testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ExportPath, + Body: buf, + ExpectedStatus: 200, + }) + + b, err = os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{"data":{"result":"success"}}`, + string(respBody), + ) + + require.Equal( + t, + `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-36697142-d46a-57b1-b25e-6336706854ea","age":31,"name":"Bob","points":1000,"verified":true}]}`, + string(b), + ) +} + +func TestImportHandler_WithNoDB_NoDatabaseAvailableError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: nil, + Method: "POST", + Path: ImportPath, + Body: nil, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) +} + +func TestImportHandler_WithWrongPayloadFormat_UnmarshalError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + buf := bytes.NewBuffer([]byte(`[]`)) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains( + t, + errResponse.Errors[0].Extensions.Stack, + "json: cannot unmarshal array into Go value of type client.BackupConfig", + ) + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal( + t, + "unmarshal error: json: cannot unmarshal array into Go value of type client.BackupConfig", + errResponse.Errors[0].Message, + ) +} + +func TestImportHandler_WithInvalidFilepath_ReturnError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + filepath := t.TempDir() + "/some/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + Body: buf, + ExpectedStatus: 400, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid file path") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "invalid file path", errResponse.Errors[0].Message) +} + +func TestImportHandler_WithDBClosed_DatastoreClosedError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defra.Close(ctx) + + filepath := t.TempDir() + "/test.json" + cfg := client.BackupConfig{ + Filepath: filepath, + } + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + Body: buf, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "datastore closed") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "datastore closed", errResponse.Errors[0].Message) +} + +func TestImportHandler_WithUnknownCollection_KeyNotFoundError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + filepath := t.TempDir() + "/test.json" + err := os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), + 0644, + ) + require.NoError(t, err) + + cfg := client.BackupConfig{ + Filepath: filepath, + } + + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + Body: buf, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "failed to get collection: datastore: key not found. Name: User") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "failed to get collection: datastore: key not found. Name: User", errResponse.Errors[0].Message) +} + +func TestImportHandler_UserCollection_NoError(t *testing.T) { + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + testLoadSchema(t, ctx, defra) + + filepath := t.TempDir() + "/test.json" + err := os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), + 0644, + ) + require.NoError(t, err) + + cfg := client.BackupConfig{ + Filepath: filepath, + } + + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + resp := DataResponse{} + _ = testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + Body: buf, + ExpectedStatus: 200, + ResponseData: &resp, + }) + + switch v := resp.Data.(type) { + case map[string]any: + require.Equal(t, "success", v["result"]) + default: + t.Fatalf("data should be of type map[string]any but got %T", resp.Data) + } + + doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) + require.NoError(t, err) + + col, err := defra.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + importedDoc, err := col.Get(ctx, doc.Key(), false) + require.NoError(t, err) + + require.Equal(t, doc.Key().String(), importedDoc.Key().String()) +} + +func TestImportHandler_WithExistingDoc_DocumentExistError(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + testLoadSchema(t, ctx, defra) + + col, err := defra.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + err = os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), + 0644, + ) + require.NoError(t, err) + + cfg := client.BackupConfig{ + Filepath: filepath, + } + + b, err := json.Marshal(cfg) + require.NoError(t, err) + buf := bytes.NewBuffer(b) + + errResponse := ErrorResponse{} + _ = testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "POST", + Path: ImportPath, + QueryParams: map[string]string{"collections": "User"}, + Body: buf, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + + require.Contains( + t, + errResponse.Errors[0].Extensions.Stack, + "failed to save a new doc to collection: a document with the given dockey already exists", + ) + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal( + t, + "failed to save a new doc to collection: a document with the given dockey already exists. DocKey: bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab", + errResponse.Errors[0].Message, + ) +} diff --git a/api/http/handlerfuncs_index.go b/api/http/handlerfuncs_index.go new file mode 100644 index 0000000000..e8d10d900e --- /dev/null +++ b/api/http/handlerfuncs_index.go @@ -0,0 +1,144 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "net/http" + "strings" + + "github.com/sourcenetwork/defradb/client" +) + +func createIndexHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + var data map[string]string + err = getJSON(req, &data) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + colNameArg := data["collection"] + fieldsArg := data["fields"] + indexNameArg := data["name"] + + col, err := db.GetCollectionByName(req.Context(), colNameArg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + fields := strings.Split(fieldsArg, ",") + fieldDescriptions := make([]client.IndexedFieldDescription, 0, len(fields)) + for _, field := range fields { + fieldDescriptions = append(fieldDescriptions, client.IndexedFieldDescription{Name: field}) + } + indexDesc := client.IndexDescription{ + Name: indexNameArg, + Fields: fieldDescriptions, + } + indexDesc, err = col.CreateIndex(req.Context(), indexDesc) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("index", indexDesc), + http.StatusOK, + ) +} + +func dropIndexHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + var data map[string]string + err = getJSON(req, &data) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusBadRequest) + return + } + + colNameArg := data["collection"] + indexNameArg := data["name"] + + col, err := db.GetCollectionByName(req.Context(), colNameArg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + err = col.DropIndex(req.Context(), indexNameArg) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + sendJSON( + req.Context(), + rw, + simpleDataResponse("result", "success"), + http.StatusOK, + ) +} + +func listIndexHandler(rw http.ResponseWriter, req *http.Request) { + db, err := dbFromContext(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + + queryParams := req.URL.Query() + collectionParam := queryParams.Get("collection") + + if collectionParam == "" { + indexesPerCol, err := db.GetAllIndexes(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + sendJSON( + req.Context(), + rw, + simpleDataResponse("collections", indexesPerCol), + http.StatusOK, + ) + } else { + col, err := db.GetCollectionByName(req.Context(), collectionParam) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + indexes, err := col.GetIndexes(req.Context()) + if err != nil { + handleErr(req.Context(), rw, err, http.StatusInternalServerError) + return + } + sendJSON( + req.Context(), + rw, + simpleDataResponse("indexes", indexes), + http.StatusOK, + ) + } +} diff --git a/api/http/handlerfuncs_index_test.go b/api/http/handlerfuncs_index_test.go new file mode 100644 index 0000000000..3e82249ef8 --- /dev/null +++ b/api/http/handlerfuncs_index_test.go @@ -0,0 +1,239 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "bytes" + "context" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/mocks" + "github.com/sourcenetwork/defradb/errors" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func addDBToContext(t *testing.T, req *http.Request, db *mocks.DB) *http.Request { + if db == nil { + db = mocks.NewDB(t) + } + ctx := context.WithValue(req.Context(), ctxDB{}, db) + return req.WithContext(ctx) +} + +func TestCreateIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { + handler := http.HandlerFunc(createIndexHandler) + assert.HTTPBodyContains(t, handler, "POST", IndexPath, nil, "no database available") +} + +func TestCreateIndexHandler_IfFailsToParseParams_ReturnError(t *testing.T) { + req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte("invalid map"))) + if err != nil { + t.Fatal(err) + } + req = addDBToContext(t, req, nil) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(createIndexHandler) + + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusBadRequest, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), "invalid character", "handler returned unexpected body") +} + +func TestCreateIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { + testError := errors.New("test error") + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) + + req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + t.Fatal(err) + } + + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(createIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestCreateIndexHandler_IfFailsToCreateIndex_ReturnError(t *testing.T) { + testError := errors.New("test error") + col := mocks.NewCollection(t) + col.EXPECT().CreateIndex(mock.Anything, mock.Anything). + Return(client.IndexDescription{}, testError) + + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) + + req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + t.Fatal(err) + } + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(createIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestDropIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { + handler := http.HandlerFunc(dropIndexHandler) + assert.HTTPBodyContains(t, handler, "DELETE", IndexPath, nil, "no database available") +} + +func TestDropIndexHandler_IfFailsToParseParams_ReturnError(t *testing.T) { + req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte("invalid map"))) + if err != nil { + t.Fatal(err) + } + req = addDBToContext(t, req, nil) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(dropIndexHandler) + + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusBadRequest, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), "invalid character", "handler returned unexpected body") +} + +func TestDropIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { + testError := errors.New("test error") + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) + + req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + t.Fatal(err) + } + + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(dropIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestDropIndexHandler_IfFailsToDropIndex_ReturnError(t *testing.T) { + testError := errors.New("test error") + col := mocks.NewCollection(t) + col.EXPECT().DropIndex(mock.Anything, mock.Anything).Return(testError) + + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) + + req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + t.Fatal(err) + } + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(dropIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestListIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { + handler := http.HandlerFunc(listIndexHandler) + assert.HTTPBodyContains(t, handler, "GET", IndexPath, nil, "no database available") +} + +func TestListIndexHandler_IfFailsToGetAllIndexes_ReturnError(t *testing.T) { + testError := errors.New("test error") + db := mocks.NewDB(t) + db.EXPECT().GetAllIndexes(mock.Anything).Return(nil, testError) + + req, err := http.NewRequest("GET", IndexPath, bytes.NewBuffer([]byte(`{}`))) + if err != nil { + t.Fatal(err) + } + + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(listIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestListIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { + testError := errors.New("test error") + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) + + u, _ := url.Parse("http://defradb.com" + IndexPath) + params := url.Values{} + params.Add("collection", "testCollection") + u.RawQuery = params.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + t.Fatal(err) + } + + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(listIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} + +func TestListIndexHandler_IfFailsToCollectionGetIndexes_ReturnError(t *testing.T) { + testError := errors.New("test error") + col := mocks.NewCollection(t) + col.EXPECT().GetIndexes(mock.Anything).Return(nil, testError) + + db := mocks.NewDB(t) + db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) + + u, _ := url.Parse("http://defradb.com" + IndexPath) + params := url.Values{} + params.Add("collection", "testCollection") + u.RawQuery = params.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + t.Fatal(err) + } + req = addDBToContext(t, req, db) + + rr := httptest.NewRecorder() + handler := http.HandlerFunc(listIndexHandler) + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") + assert.Contains(t, rr.Body.String(), testError.Error()) +} diff --git a/api/http/handlerfuncs_test.go b/api/http/handlerfuncs_test.go index b21526efc0..bb7bb71aad 100644 --- a/api/http/handlerfuncs_test.go +++ b/api/http/handlerfuncs_test.go @@ -27,6 +27,7 @@ import ( "github.com/ipfs/go-cid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" @@ -42,6 +43,7 @@ type testOptions struct { Path string Body io.Reader Headers map[string]string + QueryParams map[string]string ExpectedStatus int ResponseData any ServerOptions serverOptions @@ -69,7 +71,7 @@ func TestRootHandler(t *testing.T) { }) switch v := resp.Data.(type) { case map[string]any: - assert.Equal(t, "Welcome to the DefraDB HTTP API. Use /graphql to send queries to the database. Read the documentation at https://docs.source.network/.", v["response"]) + require.Equal(t, "Welcome to the DefraDB HTTP API. Use /graphql to send queries to the database. Read the documentation at https://docs.source.network/.", v["response"]) default: t.Fatalf("data should be of type map[string]any but got %T", resp.Data) } @@ -89,7 +91,7 @@ func TestPingHandler(t *testing.T) { switch v := resp.Data.(type) { case map[string]any: - assert.Equal(t, "pong", v["response"]) + require.Equal(t, "pong", v["response"]) default: t.Fatalf("data should be of type map[string]any but got %T", resp.Data) } @@ -113,7 +115,7 @@ func TestDumpHandlerWithNoError(t *testing.T) { switch v := resp.Data.(type) { case map[string]any: - assert.Equal(t, "ok", v["response"]) + require.Equal(t, "ok", v["response"]) default: t.Fatalf("data should be of type map[string]any but got %T", resp.Data) } @@ -132,10 +134,10 @@ func TestDumpHandlerWithDBError(t *testing.T) { ExpectedStatus: 500, ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) } func TestExecGQLWithNilBody(t *testing.T) { @@ -152,10 +154,10 @@ func TestExecGQLWithNilBody(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "body cannot be empty") - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "body cannot be empty", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "body cannot be empty") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "body cannot be empty", errResponse.Errors[0].Message) } func TestExecGQLWithEmptyBody(t *testing.T) { @@ -172,10 +174,10 @@ func TestExecGQLWithEmptyBody(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "missing GraphQL request") - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "missing GraphQL request", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "missing GraphQL request") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "missing GraphQL request", errResponse.Errors[0].Message) } type mockReadCloser struct { @@ -205,10 +207,10 @@ func TestExecGQLWithMockBody(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "error reading", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "error reading", errResponse.Errors[0].Message) } func TestExecGQLWithInvalidContentType(t *testing.T) { @@ -217,7 +219,7 @@ func TestExecGQLWithInvalidContentType(t *testing.T) { errResponse := ErrorResponse{} stmt := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -233,10 +235,10 @@ mutation { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "mime: invalid media parameter") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "mime: invalid media parameter", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "mime: invalid media parameter") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "mime: invalid media parameter", errResponse.Errors[0].Message) } func TestExecGQLWithNoDB(t *testing.T) { @@ -245,7 +247,7 @@ func TestExecGQLWithNoDB(t *testing.T) { errResponse := ErrorResponse{} stmt := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -260,10 +262,10 @@ mutation { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) } func TestExecGQLHandlerContentTypeJSONWithJSONError(t *testing.T) { @@ -273,7 +275,7 @@ func TestExecGQLHandlerContentTypeJSONWithJSONError(t *testing.T) { stmt := ` [ "query": "mutation { - create_user( + create_User( data: \"{ \\\"age\\\": 31, \\\"verified\\\": true, @@ -297,10 +299,10 @@ func TestExecGQLHandlerContentTypeJSONWithJSONError(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid character") - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "unmarshal error: invalid character ':' after array element", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid character") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "unmarshal error: invalid character ':' after array element", errResponse.Errors[0].Message) } func TestExecGQLHandlerContentTypeJSON(t *testing.T) { @@ -315,7 +317,7 @@ func TestExecGQLHandlerContentTypeJSON(t *testing.T) { stmt := ` { "query": "mutation { - create_user( + create_User( data: \"{ \\\"age\\\": 31, \\\"verified\\\": true, @@ -344,7 +346,7 @@ func TestExecGQLHandlerContentTypeJSON(t *testing.T) { ResponseData: &resp, }) - assert.Contains(t, users[0].Key, "bae-") + require.Contains(t, users[0].Key, "bae-") } func TestExecGQLHandlerContentTypeJSONWithError(t *testing.T) { @@ -359,7 +361,7 @@ func TestExecGQLHandlerContentTypeJSONWithError(t *testing.T) { stmt := ` { "query": "mutation { - create_user( + create_User( data: \"{ \\\"age\\\": 31, \\\"notAField\\\": true @@ -384,8 +386,8 @@ func TestExecGQLHandlerContentTypeJSONWithError(t *testing.T) { ResponseData: &resp, }) - assert.Contains(t, resp.Errors, "The given field does not exist. Name: notAField") - assert.Len(t, resp.Errors, 1) + require.Contains(t, resp.Errors, "The given field does not exist. Name: notAField") + require.Len(t, resp.Errors, 1) } func TestExecGQLHandlerContentTypeJSONWithCharset(t *testing.T) { @@ -400,7 +402,7 @@ func TestExecGQLHandlerContentTypeJSONWithCharset(t *testing.T) { stmt := ` { "query": "mutation { - create_user( + create_User( data: \"{ \\\"age\\\": 31, \\\"verified\\\": true, @@ -429,7 +431,7 @@ func TestExecGQLHandlerContentTypeJSONWithCharset(t *testing.T) { ResponseData: &resp, }) - assert.Contains(t, users[0].Key, "bae-") + require.Contains(t, users[0].Key, "bae-") } func TestExecGQLHandlerContentTypeFormURLEncoded(t *testing.T) { @@ -447,10 +449,10 @@ func TestExecGQLHandlerContentTypeFormURLEncoded(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "content type application/x-www-form-urlencoded not yet supported") - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "content type application/x-www-form-urlencoded not yet supported", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "content type application/x-www-form-urlencoded not yet supported") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "content type application/x-www-form-urlencoded not yet supported", errResponse.Errors[0].Message) } func TestExecGQLHandlerContentTypeGraphQL(t *testing.T) { @@ -464,7 +466,7 @@ func TestExecGQLHandlerContentTypeGraphQL(t *testing.T) { // add document stmt := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -485,7 +487,7 @@ mutation { ResponseData: &resp, }) - assert.Contains(t, users[0].Key, "bae-") + require.Contains(t, users[0].Key, "bae-") } func TestExecGQLHandlerContentTypeText(t *testing.T) { @@ -499,7 +501,7 @@ func TestExecGQLHandlerContentTypeText(t *testing.T) { // add document stmt := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -519,7 +521,7 @@ mutation { ResponseData: &resp, }) - assert.Contains(t, users[0].Key, "bae-") + require.Contains(t, users[0].Key, "bae-") } func TestExecGQLHandlerWithSubsctiption(t *testing.T) { @@ -532,7 +534,7 @@ func TestExecGQLHandlerWithSubsctiption(t *testing.T) { stmt := ` subscription { - user { + User { _key age name @@ -565,7 +567,7 @@ subscription { // add document stmt2 := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -586,12 +588,144 @@ mutation { }) select { case data := <-ch: - assert.Contains(t, string(data), users[0].Key) + require.Contains(t, string(data), users[0].Key) case err := <-errCh: t.Fatal(err) } } +func TestListSchemaHandlerWithoutDB(t *testing.T) { + t.Cleanup(CleanupEnv) + env = "dev" + + errResponse := ErrorResponse{} + testRequest(testOptions{ + Testing: t, + DB: nil, + Method: "GET", + Path: SchemaPath, + ExpectedStatus: 500, + ResponseData: &errResponse, + }) + + assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + assert.Equal(t, "no database available", errResponse.Errors[0].Message) +} + +func TestListSchemaHandlerWitNoError(t *testing.T) { + ctx := context.Background() + defra := testNewInMemoryDB(t, ctx) + defer defra.Close(ctx) + + stmt := ` +type user { + name: String + age: Int + verified: Boolean + points: Float +} +type group { + owner: user + members: [user] +}` + + _, err := defra.AddSchema(ctx, stmt) + if err != nil { + t.Fatal(err) + } + + resp := DataResponse{} + testRequest(testOptions{ + Testing: t, + DB: defra, + Method: "GET", + Path: SchemaPath, + ExpectedStatus: 200, + ResponseData: &resp, + }) + + switch v := resp.Data.(type) { + case map[string]any: + assert.Equal(t, map[string]any{ + "collections": []any{ + map[string]any{ + "name": "group", + "id": "bafkreieunyhcyupkdppyo2g4zcqtdxvj5xi4f422gp2jwene6ohndvcobe", + "version_id": "bafkreieunyhcyupkdppyo2g4zcqtdxvj5xi4f422gp2jwene6ohndvcobe", + "fields": []any{ + map[string]any{ + "id": "0", + "kind": "ID", + "name": "_key", + "internal": true, + }, + map[string]any{ + "id": "1", + "kind": "[user]", + "name": "members", + "internal": false, + }, + map[string]any{ + "id": "2", + "kind": "user", + "name": "owner", + "internal": false, + }, + map[string]any{ + "id": "3", + "kind": "ID", + "name": "owner_id", + "internal": true, + }, + }, + }, + map[string]any{ + "name": "user", + "id": "bafkreigrucdl7x3lsa4xwgz2bn7lbqmiwkifnspgx7hlkpaal3o55325bq", + "version_id": "bafkreigrucdl7x3lsa4xwgz2bn7lbqmiwkifnspgx7hlkpaal3o55325bq", + "fields": []any{ + map[string]any{ + "id": "0", + "kind": "ID", + "name": "_key", + "internal": true, + }, + map[string]any{ + "id": "1", + "kind": "Int", + "name": "age", + "internal": false, + }, + map[string]any{ + "id": "2", + "kind": "String", + "name": "name", + "internal": false, + }, + map[string]any{ + "id": "3", + "kind": "Float", + "name": "points", + "internal": false, + }, + map[string]any{ + "id": "4", + "kind": "Boolean", + "name": "verified", + "internal": false, + }, + }, + }, + }, + }, v) + + default: + t.Fatalf("data should be of type map[string]any but got %T\n%v", resp.Data, v) + } +} + func TestLoadSchemaHandlerWithReadBodyError(t *testing.T) { t.Cleanup(CleanupEnv) env = "dev" @@ -604,23 +738,23 @@ func TestLoadSchemaHandlerWithReadBodyError(t *testing.T) { Testing: t, DB: nil, Method: "POST", - Path: SchemaLoadPath, + Path: SchemaPath, Body: &mockReadCloser, ExpectedStatus: 500, ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "error reading", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "error reading", errResponse.Errors[0].Message) } func TestLoadSchemaHandlerWithoutDB(t *testing.T) { t.Cleanup(CleanupEnv) env = "dev" stmt := ` -type user { +type User { name: String age: Int verified: Boolean @@ -634,16 +768,16 @@ type user { Testing: t, DB: nil, Method: "POST", - Path: SchemaLoadPath, + Path: SchemaPath, Body: buf, ExpectedStatus: 500, ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) } func TestLoadSchemaHandlerWithAddSchemaError(t *testing.T) { @@ -655,7 +789,7 @@ func TestLoadSchemaHandlerWithAddSchemaError(t *testing.T) { // statement with types instead of type stmt := ` -types user { +types User { name: String age: Int verified: Boolean @@ -669,18 +803,18 @@ types user { Testing: t, DB: defra, Method: "POST", - Path: SchemaLoadPath, + Path: SchemaPath, Body: buf, ExpectedStatus: 500, ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "Syntax Error GraphQL (2:1) Unexpected Name") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal( + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "Syntax Error GraphQL (2:1) Unexpected Name") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal( t, - "Syntax Error GraphQL (2:1) Unexpected Name \"types\"\n\n1: \n2: types user {\n ^\n3: \\u0009name: String\n", + "Syntax Error GraphQL (2:1) Unexpected Name \"types\"\n\n1: \n2: types User {\n ^\n3: \\u0009name: String\n", errResponse.Errors[0].Message, ) } @@ -691,7 +825,7 @@ func TestLoadSchemaHandlerWitNoError(t *testing.T) { defer defra.Close(ctx) stmt := ` -type user { +type User { name: String age: Int verified: Boolean @@ -705,7 +839,7 @@ type user { Testing: t, DB: defra, Method: "POST", - Path: SchemaLoadPath, + Path: SchemaPath, Body: buf, ExpectedStatus: 200, ResponseData: &resp, @@ -713,12 +847,13 @@ type user { switch v := resp.Data.(type) { case map[string]any: - assert.Equal(t, map[string]any{ + require.Equal(t, map[string]any{ "result": "success", "collections": []any{ map[string]any{ - "name": "user", - "id": "bafkreigrucdl7x3lsa4xwgz2bn7lbqmiwkifnspgx7hlkpaal3o55325bq", + "name": "User", + "id": "bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske", + "version_id": "bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske", }, }, }, v) @@ -742,10 +877,10 @@ func TestGetBlockHandlerWithMultihashError(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "illegal base32 data at input byte 0") - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "illegal base32 data at input byte 0", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "illegal base32 data at input byte 0") + require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "illegal base32 data at input byte 0", errResponse.Errors[0].Message) } func TestGetBlockHandlerWithDSKeyWithNoDB(t *testing.T) { @@ -768,10 +903,10 @@ func TestGetBlockHandlerWithDSKeyWithNoDB(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) } func TestGetBlockHandlerWithNoDB(t *testing.T) { @@ -788,10 +923,10 @@ func TestGetBlockHandlerWithNoDB(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no database available", errResponse.Errors[0].Message) } func TestGetBlockHandlerWithGetBlockstoreError(t *testing.T) { @@ -812,10 +947,10 @@ func TestGetBlockHandlerWithGetBlockstoreError(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm") + require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm", errResponse.Errors[0].Message) } func TestGetBlockHandlerWithValidBlockstore(t *testing.T) { @@ -828,7 +963,7 @@ func TestGetBlockHandlerWithValidBlockstore(t *testing.T) { // add document stmt := ` mutation { - create_user(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { + create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { _key } }` @@ -856,7 +991,7 @@ mutation { // get document cid stmt2 := ` query { - user (dockey: "%s") { + User (dockey: "%s") { _version { cid } @@ -898,7 +1033,7 @@ query { case map[string]any: switch val := d["val"].(type) { case string: - assert.Equal(t, "pGNhZ2UYH2RuYW1lY0JvYmZwb2ludHMYWmh2ZXJpZmllZPU=", val) + require.Equal(t, "pGNhZ2UYH2RuYW1lY0JvYmZwb2ludHMYWmh2ZXJpZmllZPU=", val) default: t.Fatalf("expecting string but got %T", val) } @@ -924,7 +1059,7 @@ func TestPeerIDHandler(t *testing.T) { switch v := resp.Data.(type) { case map[string]any: - assert.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", v["peerID"]) + require.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", v["peerID"]) default: t.Fatalf("data should be of type map[string]any but got %T", resp.Data) } @@ -945,13 +1080,13 @@ func TestPeerIDHandlerWithNoPeerIDInContext(t *testing.T) { ResponseData: &errResponse, }) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no PeerID available. P2P might be disabled") - assert.Equal(t, http.StatusNotFound, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Not Found", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no PeerID available. P2P might be disabled", errResponse.Errors[0].Message) + require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no PeerID available. P2P might be disabled") + require.Equal(t, http.StatusNotFound, errResponse.Errors[0].Extensions.Status) + require.Equal(t, "Not Found", errResponse.Errors[0].Extensions.HTTPError) + require.Equal(t, "no PeerID available. P2P might be disabled", errResponse.Errors[0].Message) } -func testRequest(opt testOptions) { +func testRequest(opt testOptions) []byte { req, err := http.NewRequest(opt.Method, opt.Path, opt.Body) if err != nil { opt.Testing.Fatal(err) @@ -961,20 +1096,30 @@ func testRequest(opt testOptions) { req.Header.Set(k, v) } + q := req.URL.Query() + for k, v := range opt.QueryParams { + q.Add(k, v) + } + req.URL.RawQuery = q.Encode() + h := newHandler(opt.DB, opt.ServerOptions) rec := httptest.NewRecorder() h.ServeHTTP(rec, req) assert.Equal(opt.Testing, opt.ExpectedStatus, rec.Result().StatusCode) - respBody, err := io.ReadAll(rec.Result().Body) + resBody, err := io.ReadAll(rec.Result().Body) if err != nil { opt.Testing.Fatal(err) } - err = json.Unmarshal(respBody, &opt.ResponseData) - if err != nil { - opt.Testing.Fatal(err) + if opt.ResponseData != nil { + err = json.Unmarshal(resBody, &opt.ResponseData) + if err != nil { + opt.Testing.Fatal(err) + } } + + return resBody } func testSubscriptionRequest(ctx context.Context, opt testOptions, ch chan []byte, errCh chan error) { @@ -993,7 +1138,7 @@ func testSubscriptionRequest(ctx context.Context, opt testOptions, ch chan []byt h := newHandler(opt.DB, opt.ServerOptions) rec := httptest.NewRecorder() h.ServeHTTP(rec, req) - assert.Equal(opt.Testing, opt.ExpectedStatus, rec.Result().StatusCode) + require.Equal(opt.Testing, opt.ExpectedStatus, rec.Result().StatusCode) respBody, err := io.ReadAll(rec.Result().Body) if err != nil { @@ -1026,7 +1171,7 @@ func testNewInMemoryDB(t *testing.T, ctx context.Context) client.DB { func testLoadSchema(t *testing.T, ctx context.Context, db client.DB) { stmt := ` -type user { +type User { name: String age: Int verified: Boolean diff --git a/api/http/logger_test.go b/api/http/logger_test.go index 75367eba29..9c2791d9df 100644 --- a/api/http/logger_test.go +++ b/api/http/logger_test.go @@ -79,12 +79,11 @@ func TestLoggerKeyValueOutput(t *testing.T) { rec2 := httptest.NewRecorder() - h := newHandler(nil, serverOptions{}) log.ApplyConfig(logging.Config{ EncoderFormat: logging.NewEncoderFormatOption(logging.JSON), OutputPaths: []string{logFile}, }) - loggerMiddleware(h.handle(pingHandler)).ServeHTTP(rec2, req) + loggerMiddleware(http.HandlerFunc(pingHandler)).ServeHTTP(rec2, req) assert.Equal(t, 200, rec2.Result().StatusCode) // inspect the log file diff --git a/api/http/playground.go b/api/http/playground.go new file mode 100644 index 0000000000..0a69e312b2 --- /dev/null +++ b/api/http/playground.go @@ -0,0 +1,28 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +//go:build playground + +package http + +import ( + "io/fs" + "net/http" + + "github.com/sourcenetwork/defradb/playground" +) + +func init() { + sub, err := fs.Sub(playground.Dist, "dist") + if err != nil { + panic(err) + } + playgroundHandler = http.FileServer(http.FS(sub)) +} diff --git a/api/http/router.go b/api/http/router.go index ad1111ad71..2d54a16560 100644 --- a/api/http/router.go +++ b/api/http/router.go @@ -11,12 +11,11 @@ package http import ( + "net/http" "net/url" "path" "strings" - "github.com/go-chi/chi/v5" - "github.com/go-chi/cors" "github.com/pkg/errors" ) @@ -25,42 +24,42 @@ const ( Version string = "v0" versionedAPIPath string = "/api/" + Version - RootPath string = versionedAPIPath + "" - PingPath string = versionedAPIPath + "/ping" - DumpPath string = versionedAPIPath + "/debug/dump" - BlocksPath string = versionedAPIPath + "/blocks" - GraphQLPath string = versionedAPIPath + "/graphql" - SchemaLoadPath string = versionedAPIPath + "/schema/load" - SchemaPatchPath string = versionedAPIPath + "/schema/patch" - PeerIDPath string = versionedAPIPath + "/peerid" + RootPath string = versionedAPIPath + "" + PingPath string = versionedAPIPath + "/ping" + DumpPath string = versionedAPIPath + "/debug/dump" + BlocksPath string = versionedAPIPath + "/blocks" + GraphQLPath string = versionedAPIPath + "/graphql" + SchemaPath string = versionedAPIPath + "/schema" + SchemaMigrationPath string = SchemaPath + "/migration" + IndexPath string = versionedAPIPath + "/index" + PeerIDPath string = versionedAPIPath + "/peerid" + BackupPath string = versionedAPIPath + "/backup" + ExportPath string = BackupPath + "/export" + ImportPath string = BackupPath + "/import" ) -func setRoutes(h *handler) *handler { - h.Mux = chi.NewRouter() - - // setup CORS - if len(h.options.allowedOrigins) != 0 { - h.Use(cors.Handler(cors.Options{ - AllowedOrigins: h.options.allowedOrigins, - AllowedMethods: []string{"GET", "POST", "OPTIONS"}, - AllowedHeaders: []string{"Content-Type"}, - MaxAge: 300, - })) - } +// playgroundHandler is set when building with the playground build tag +var playgroundHandler http.Handler - // setup logger middleware - h.Use(loggerMiddleware) - - // define routes - h.Get(RootPath, h.handle(rootHandler)) - h.Get(PingPath, h.handle(pingHandler)) - h.Get(DumpPath, h.handle(dumpHandler)) - h.Get(BlocksPath+"/{cid}", h.handle(getBlockHandler)) - h.Get(GraphQLPath, h.handle(execGQLHandler)) - h.Post(GraphQLPath, h.handle(execGQLHandler)) - h.Post(SchemaLoadPath, h.handle(loadSchemaHandler)) - h.Post(SchemaPatchPath, h.handle(patchSchemaHandler)) - h.Get(PeerIDPath, h.handle(peerIDHandler)) +func setRoutes(h *handler) *handler { + h.Get(RootPath, rootHandler) + h.Get(PingPath, pingHandler) + h.Get(DumpPath, dumpHandler) + h.Get(BlocksPath+"/{cid}", getBlockHandler) + h.Get(GraphQLPath, execGQLHandler) + h.Post(GraphQLPath, execGQLHandler) + h.Get(SchemaPath, listSchemaHandler) + h.Post(SchemaPath, loadSchemaHandler) + h.Patch(SchemaPath, patchSchemaHandler) + h.Post(SchemaMigrationPath, setMigrationHandler) + h.Get(SchemaMigrationPath, getMigrationHandler) + h.Post(IndexPath, createIndexHandler) + h.Delete(IndexPath, dropIndexHandler) + h.Get(IndexPath, listIndexHandler) + h.Get(PeerIDPath, peerIDHandler) + h.Post(ExportPath, exportHandler) + h.Post(ImportPath, importHandler) + h.Handle("/*", playgroundHandler) return h } diff --git a/cli/backup.go b/cli/backup.go new file mode 100644 index 0000000000..15877fa7fb --- /dev/null +++ b/cli/backup.go @@ -0,0 +1,25 @@ +// Copyright 2022 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeBackupCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "backup", + Short: "Interact with the backup utility", + Long: `Export to or Import from a backup file. +Currently only supports JSON format.`, + } + return cmd +} diff --git a/cli/backup_export.go b/cli/backup_export.go new file mode 100644 index 0000000000..32184bfe35 --- /dev/null +++ b/cli/backup_export.go @@ -0,0 +1,142 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "os" + "strings" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +const jsonFileType = "json" + +func MakeBackupExportCommand(cfg *config.Config) *cobra.Command { + var collections []string + var pretty bool + var format string + var cmd = &cobra.Command{ + Use: "export [-c --collections | -p --pretty | -f --format] ", + Short: "Export the database to a file", + Long: `Export the database to a file. If a file exists at the location, it will be overwritten. + +If the --collection flag is provided, only the data for that collection will be exported. +Otherwise, all collections in the database will be exported. + +If the --pretty flag is provided, the JSON will be pretty printed. + +Example: export data for the 'Users' collection: + defradb client export --collection Users user_data.json`, + Args: func(cmd *cobra.Command, args []string) error { + if err := cobra.ExactArgs(1)(cmd, args); err != nil { + return NewErrInvalidArgumentLength(err, 1) + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) (err error) { + if !isValidExportFormat(format) { + return ErrInvalidExportFormat + } + outputPath := args[0] + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.ExportPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + for i := range collections { + collections[i] = strings.Trim(collections[i], " ") + } + + data := client.BackupConfig{ + Filepath: outputPath, + Format: format, + Pretty: pretty, + Collections: collections, + } + + b, err := json.Marshal(data) + if err != nil { + return err + } + + res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(b)) + if err != nil { + return NewErrFailedToSendRequest(err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return err + } + + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + type exportResponse struct { + Errors []struct { + Message string `json:"message"` + } `json:"errors"` + } + r := exportResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to export data", + logging.NewKV("Errors", r.Errors)) + } else if len(collections) == 1 { + log.FeedbackInfo(cmd.Context(), "Data exported for collection "+collections[0]) + } else if len(collections) > 1 { + log.FeedbackInfo(cmd.Context(), "Data exported for collections "+strings.Join(collections, ", ")) + } else { + log.FeedbackInfo(cmd.Context(), "Data exported for all collections") + } + } + return nil + }, + } + cmd.Flags().BoolVarP(&pretty, "pretty", "p", false, "Set the output JSON to be pretty printed") + cmd.Flags().StringVarP(&format, "format", "f", jsonFileType, + "Define the output format. Supported formats: [json]") + cmd.Flags().StringSliceVarP(&collections, "collections", "c", []string{}, "List of collections") + + return cmd +} + +func isValidExportFormat(format string) bool { + switch strings.ToLower(format) { + case jsonFileType: + return true + default: + return false + } +} diff --git a/cli/backup_export_test.go b/cli/backup_export_test.go new file mode 100644 index 0000000000..9539a1cdb1 --- /dev/null +++ b/cli/backup_export_test.go @@ -0,0 +1,300 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" +) + +func TestBackupExportCmd_WithNoArgument_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + + dbExportCmd := MakeBackupExportCommand(cfg) + err := dbExportCmd.ValidateArgs([]string{}) + require.ErrorIs(t, err, ErrInvalidArgumentLength) +} + +func TestBackupExportCmd_WithInvalidExportFormat_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + dbExportCmd := MakeBackupExportCommand(cfg) + + filepath := t.TempDir() + "/test.json" + + dbExportCmd.Flags().Set("format", "invalid") + err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.ErrorIs(t, err, ErrInvalidExportFormat) +} + +func TestBackupExportCmd_IfInvalidAddress_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + cfg.API.Address = "invalid address" + + filepath := t.TempDir() + "/test.json" + + dbExportCmd := MakeBackupExportCommand(cfg) + err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) +} + +func TestBackupExportCmd_WithEmptyDatastore_NoError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + + require.Len(t, b, 2) // file should be an empty json object +} + +func TestBackupExportCmd_WithInvalidCollection_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + dbExportCmd.Flags().Set("collections", "User") + err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Failed to export data")) +} + +func TestBackupExportCmd_WithAllCollection_NoError(t *testing.T) { + ctx := context.Background() + + cfg, di, close := startTestNode(t) + defer close() + + _, err := di.db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + col, err := di.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + string(b), + ) +} + +func TestBackupExportCmd_WithAllCollectionAndPrettyFormating_NoError(t *testing.T) { + ctx := context.Background() + + cfg, di, close := startTestNode(t) + defer close() + + _, err := di.db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + col, err := di.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + dbExportCmd.Flags().Set("pretty", "true") + err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{ + "User": [ + { + "_key": "bae-e933420a-988a-56f8-8952-6c245aebd519", + "_newKey": "bae-e933420a-988a-56f8-8952-6c245aebd519", + "age": 30, + "name": "John" + } + ] +}`, + string(b), + ) +} + +func TestBackupExportCmd_WithSingleCollection_NoError(t *testing.T) { + ctx := context.Background() + + cfg, di, close := startTestNode(t) + defer close() + + _, err := di.db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + col, err := di.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + dbExportCmd.Flags().Set("collections", "User") + err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Data exported for collection User")) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + + require.Equal( + t, + `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + string(b), + ) +} + +func TestBackupExportCmd_WithMultipleCollections_NoError(t *testing.T) { + ctx := context.Background() + + cfg, di, close := startTestNode(t) + defer close() + + _, err := di.db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + col1, err := di.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) + require.NoError(t, err) + + col2, err := di.db.GetCollectionByName(ctx, "Address") + require.NoError(t, err) + + err = col2.Create(ctx, doc2) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbExportCmd := MakeBackupExportCommand(cfg) + dbExportCmd.Flags().Set("collections", "User, Address") + err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Data exported for collections User, Address")) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + + require.EqualValues(t, expectedMap, fileMap) +} diff --git a/cli/backup_import.go b/cli/backup_import.go new file mode 100644 index 0000000000..6802230aa0 --- /dev/null +++ b/cli/backup_import.go @@ -0,0 +1,98 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "os" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +func MakeBackupImportCommand(cfg *config.Config) *cobra.Command { + var cmd = &cobra.Command{ + Use: "import ", + Short: "Import a JSON data file to the database", + Long: `Import a JSON data file to the database. + +Example: import data to the database: + defradb client import user_data.json`, + Args: func(cmd *cobra.Command, args []string) error { + if err := cobra.ExactArgs(1)(cmd, args); err != nil { + return NewErrInvalidArgumentLength(err, 1) + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) (err error) { + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.ImportPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + inputPath := args[0] + data := map[string]string{ + "filepath": inputPath, + } + + b, err := json.Marshal(data) + if err != nil { + return err + } + + res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(b)) + if err != nil { + return NewErrFailedToSendRequest(err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return err + } + + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + r := indexCreateResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to import data", + logging.NewKV("Errors", r.Errors)) + } else { + log.FeedbackInfo(cmd.Context(), "Successfully imported data from file", + logging.NewKV("File", inputPath)) + } + } + return nil + }, + } + return cmd +} diff --git a/cli/backup_import_test.go b/cli/backup_import_test.go new file mode 100644 index 0000000000..ce84c5c2c6 --- /dev/null +++ b/cli/backup_import_test.go @@ -0,0 +1,130 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" +) + +func TestBackupImportCmd_WithNoArgument_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + setTestingAddresses(cfg) + + dbImportCmd := MakeBackupImportCommand(cfg) + err := dbImportCmd.ValidateArgs([]string{}) + require.ErrorIs(t, err, ErrInvalidArgumentLength) +} + +func TestBackupImportCmd_IfInvalidAddress_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + cfg.API.Address = "invalid address" + + filepath := t.TempDir() + "/test.json" + + dbImportCmd := MakeBackupImportCommand(cfg) + err := dbImportCmd.RunE(dbImportCmd, []string{filepath}) + require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) +} + +func TestBackupImportCmd_WithNonExistantFile_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + filepath := t.TempDir() + "/test.json" + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbImportCmd := MakeBackupImportCommand(cfg) + err := dbImportCmd.RunE(dbImportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Failed to import data")) +} + +func TestBackupImportCmd_WithEmptyDatastore_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + filepath := t.TempDir() + "/test.json" + + err := os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), + 0664, + ) + require.NoError(t, err) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbImportCmd := MakeBackupImportCommand(cfg) + err = dbImportCmd.RunE(dbImportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Failed to import data")) +} + +func TestBackupImportCmd_WithExistingCollection_NoError(t *testing.T) { + ctx := context.Background() + + cfg, di, close := startTestNode(t) + defer close() + + _, err := di.db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), + 0664, + ) + require.NoError(t, err) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + dbImportCmd := MakeBackupImportCommand(cfg) + err = dbImportCmd.RunE(dbImportCmd, []string{filepath}) + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, lineHas(logLines, "msg", "Successfully imported data from file")) + + col, err := di.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + key, err := client.NewDocKeyFromString("bae-e933420a-988a-56f8-8952-6c245aebd519") + require.NoError(t, err) + doc, err := col.Get(ctx, key, false) + require.NoError(t, err) + + val, err := doc.Get("name") + require.NoError(t, err) + + require.Equal(t, "John", val.(string)) +} diff --git a/cli/blocks_get.go b/cli/blocks_get.go index 4223745fc4..c3519f99e7 100644 --- a/cli/blocks_get.go +++ b/cli/blocks_get.go @@ -24,7 +24,7 @@ import ( func MakeBlocksGetCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "get [CID]", - Short: "Get a block by its CID from the blockstore.", + Short: "Get a block by its CID from the blockstore", RunE: func(cmd *cobra.Command, args []string) (err error) { if len(args) != 1 { return NewErrMissingArg("CID") diff --git a/cli/cli.go b/cli/cli.go index 615ed1c208..707adbab7c 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -57,7 +57,10 @@ func NewDefraCommand(cfg *config.Config) DefraCommand { rpcCmd := MakeRPCCommand(cfg) blocksCmd := MakeBlocksCommand() schemaCmd := MakeSchemaCommand() + schemaMigrationCmd := MakeSchemaMigrationCommand() + indexCmd := MakeIndexCommand() clientCmd := MakeClientCommand() + backupCmd := MakeBackupCommand() rpcReplicatorCmd := MakeReplicatorCommand() p2pCollectionCmd := MakeP2PCollectionCommand() p2pCollectionCmd.AddCommand( @@ -77,9 +80,24 @@ func NewDefraCommand(cfg *config.Config) DefraCommand { blocksCmd.AddCommand( MakeBlocksGetCommand(cfg), ) + schemaMigrationCmd.AddCommand( + MakeSchemaMigrationSetCommand(cfg), + MakeSchemaMigrationGetCommand(cfg), + ) schemaCmd.AddCommand( MakeSchemaAddCommand(cfg), + MakeSchemaListCommand(cfg), MakeSchemaPatchCommand(cfg), + schemaMigrationCmd, + ) + indexCmd.AddCommand( + MakeIndexCreateCommand(cfg), + MakeIndexDropCommand(cfg), + MakeIndexListCommand(cfg), + ) + backupCmd.AddCommand( + MakeBackupExportCommand(cfg), + MakeBackupImportCommand(cfg), ) clientCmd.AddCommand( MakeDumpCommand(cfg), @@ -87,8 +105,10 @@ func NewDefraCommand(cfg *config.Config) DefraCommand { MakeRequestCommand(cfg), MakePeerIDCommand(cfg), schemaCmd, + indexCmd, rpcCmd, blocksCmd, + backupCmd, ) rootCmd.AddCommand( clientCmd, diff --git a/cli/client.go b/cli/client.go index 1e6ba43ae5..2456df8d43 100644 --- a/cli/client.go +++ b/cli/client.go @@ -17,9 +17,9 @@ import ( func MakeClientCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "client", - Short: "Interact with a running DefraDB node as a client", - Long: `Interact with a running DefraDB node as a client. -Execute queries, add schema types, and run debug routines.`, + Short: "Interact with a DefraDB node", + Long: `Interact with a DefraDB node. +Execute queries, add schema types, obtain node info, etc.`, } return cmd diff --git a/cli/dump.go b/cli/dump.go index a23d160e7e..f35e9232b1 100644 --- a/cli/dump.go +++ b/cli/dump.go @@ -12,7 +12,6 @@ package cli import ( "encoding/json" - "fmt" "io" "net/http" "os" @@ -27,7 +26,7 @@ import ( func MakeDumpCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "dump", - Short: "Dump the contents of a database node-side", + Short: "Dump the contents of DefraDB node-side", RunE: func(cmd *cobra.Command, _ []string) (err error) { stdout, err := os.Stdout.Stat() if err != nil { @@ -49,7 +48,7 @@ func MakeDumpCommand(cfg *config.Config) *cobra.Command { defer func() { if e := res.Body.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to read response body: %v", e.Error()), err) + err = NewErrFailedToCloseResponseBody(e, err) } }() diff --git a/cli/errors.go b/cli/errors.go index be0b75ff5f..17e4819a8b 100644 --- a/cli/errors.go +++ b/cli/errors.go @@ -10,7 +10,11 @@ package cli -import "github.com/sourcenetwork/defradb/errors" +import ( + "strings" + + "github.com/sourcenetwork/defradb/errors" +) const ( errMissingArg string = "missing argument" @@ -25,10 +29,14 @@ const ( errFailedToJoinEndpoint string = "failed to join endpoint" errFailedToSendRequest string = "failed to send request" errFailedToReadResponseBody string = "failed to read response body" + errFailedToCloseResponseBody string = "failed to close response body" errFailedToStatStdOut string = "failed to stat stdout" errFailedToHandleGQLErrors string = "failed to handle GraphQL errors" errFailedToPrettyPrintResponse string = "failed to pretty print response" errFailedToUnmarshalResponse string = "failed to unmarshal response" + errFailedParsePeerID string = "failed to parse PeerID" + errFailedToMarshalData string = "failed to marshal data" + errInvalidArgumentLength string = "invalid argument length" ) // Errors returnable from this package. @@ -43,7 +51,7 @@ var ( ErrEmptyStdin = errors.New(errEmptyStdin) ErrFailedToReadFile = errors.New(errFailedToReadFile) ErrFailedToReadStdin = errors.New(errFailedToReadStdin) - ErrFailToWrapRPCClient = errors.New(errFailedToCreateRPCClient) + ErrFailedToCreateRPCClient = errors.New(errFailedToCreateRPCClient) ErrFailedToAddReplicator = errors.New(errFailedToAddReplicator) ErrFailedToJoinEndpoint = errors.New(errFailedToJoinEndpoint) ErrFailedToSendRequest = errors.New(errFailedToSendRequest) @@ -52,14 +60,21 @@ var ( ErrFailedToHandleGQLErrors = errors.New(errFailedToHandleGQLErrors) ErrFailedToPrettyPrintResponse = errors.New(errFailedToPrettyPrintResponse) ErrFailedToUnmarshalResponse = errors.New(errFailedToUnmarshalResponse) + ErrFailedParsePeerID = errors.New(errFailedParsePeerID) + ErrInvalidExportFormat = errors.New("invalid export format") + ErrInvalidArgumentLength = errors.New(errInvalidArgumentLength) ) func NewErrMissingArg(name string) error { return errors.New(errMissingArg, errors.NewKV("Name", name)) } -func NewErrMissingArgs(count int, provided int) error { - return errors.New(errMissingArgs, errors.NewKV("Required", count), errors.NewKV("Provided", provided)) +func NewErrMissingArgs(names []string) error { + return errors.New(errMissingArgs, errors.NewKV("Required", strings.Join(names, ", "))) +} + +func NewErrTooManyArgs(max, actual int) error { + return errors.New(errTooManyArgs, errors.NewKV("Max", max), errors.NewKV("Actual", actual)) } func NewFailedToReadFile(inner error) error { @@ -90,6 +105,13 @@ func NewErrFailedToReadResponseBody(inner error) error { return errors.Wrap(errFailedToReadResponseBody, inner) } +func NewErrFailedToCloseResponseBody(closeErr, other error) error { + if other != nil { + return errors.Wrap(errFailedToCloseResponseBody, closeErr, errors.NewKV("Other error", other)) + } + return errors.Wrap(errFailedToCloseResponseBody, closeErr) +} + func NewErrFailedToStatStdOut(inner error) error { return errors.Wrap(errFailedToStatStdOut, inner) } @@ -105,3 +127,17 @@ func NewErrFailedToPrettyPrintResponse(inner error) error { func NewErrFailedToUnmarshalResponse(inner error) error { return errors.Wrap(errFailedToUnmarshalResponse, inner) } + +func NewErrFailedParsePeerID(inner error) error { + return errors.Wrap(errFailedParsePeerID, inner) +} + +// NewFailedToMarshalData returns an error indicating that a there was a problem with mashalling. +func NewFailedToMarshalData(inner error) error { + return errors.Wrap(errFailedToMarshalData, inner) +} + +// NewErrInvalidArgumentLength returns an error indicating an incorrect number of arguments. +func NewErrInvalidArgumentLength(inner error, expected int) error { + return errors.Wrap(errInvalidArgumentLength, inner, errors.NewKV("Expected", expected)) +} diff --git a/cli/index.go b/cli/index.go new file mode 100644 index 0000000000..a7343ddce2 --- /dev/null +++ b/cli/index.go @@ -0,0 +1,25 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeIndexCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "index", + Short: "Manage collections' indexes of a running DefraDB instance", + Long: `Manage (create, drop, or list) collection indexes on a DefraDB node.`, + } + + return cmd +} diff --git a/cli/index_create.go b/cli/index_create.go new file mode 100644 index 0000000000..a91a76d2d0 --- /dev/null +++ b/cli/index_create.go @@ -0,0 +1,125 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "os" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +type indexCreateResponse struct { + Data struct { + Index client.IndexDescription `json:"index"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` +} + +func MakeIndexCreateCommand(cfg *config.Config) *cobra.Command { + var collectionArg string + var nameArg string + var fieldsArg string + var cmd = &cobra.Command{ + Use: "create -c --collection --fields [-n --name ]", + Short: "Creates a secondary index on a collection's field(s)", + Long: `Creates a secondary index on a collection's field(s). + +The --name flag is optional. If not provided, a name will be generated automatically. + +Example: create an index for 'Users' collection on 'name' field: + defradb client index create --collection Users --fields name + +Example: create a named index for 'Users' collection on 'name' field: + defradb client index create --collection Users --fields name --name UsersByName`, + ValidArgs: []string{"collection", "fields", "name"}, + RunE: func(cmd *cobra.Command, args []string) (err error) { + if collectionArg == "" || fieldsArg == "" { + if collectionArg == "" { + return NewErrMissingArg("collection") + } else { + return NewErrMissingArg("fields") + } + } + + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + data := map[string]string{ + "collection": collectionArg, + "fields": fieldsArg, + } + if nameArg != "" { + data["name"] = nameArg + } + + jsonData, err := json.Marshal(data) + if err != nil { + return err + } + + res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(jsonData)) + if err != nil { + return NewErrFailedToSendRequest(err) + } + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return err + } + + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + r := indexCreateResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to create index", + logging.NewKV("Errors", r.Errors)) + } else { + log.FeedbackInfo(cmd.Context(), "Successfully created index", + logging.NewKV("Index", r.Data.Index)) + } + } + return nil + }, + } + cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") + cmd.Flags().StringVarP(&nameArg, "name", "n", "", "Index name") + cmd.Flags().StringVar(&fieldsArg, "fields", "", "Fields to index") + + return cmd +} diff --git a/cli/index_create_test.go b/cli/index_create_test.go new file mode 100644 index 0000000000..7032abbb2f --- /dev/null +++ b/cli/index_create_test.go @@ -0,0 +1,244 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "io" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" + +func getTestConfig(t *testing.T) *config.Config { + cfg := config.DefaultConfig() + dir := t.TempDir() + cfg.Datastore.Store = "memory" + cfg.Datastore.Badger.Path = dir + cfg.Net.P2PDisabled = false + cfg.Net.P2PAddress = randomMultiaddr + cfg.Net.RPCAddress = "0.0.0.0:0" + cfg.Net.TCPAddress = randomMultiaddr + return cfg +} + +func startTestNode(t *testing.T) (*config.Config, *defraInstance, func()) { + cfg := getTestConfig(t) + setTestingAddresses(cfg) + + ctx := context.Background() + di, err := start(ctx, cfg) + require.NoError(t, err) + return cfg, di, func() { di.close(ctx) } +} + +func parseLines(r io.Reader) ([]map[string]any, error) { + fileScanner := bufio.NewScanner(r) + + fileScanner.Split(bufio.ScanLines) + + logLines := []map[string]any{} + for fileScanner.Scan() { + loggedLine := make(map[string]any) + err := json.Unmarshal(fileScanner.Bytes(), &loggedLine) + if err != nil { + return nil, err + } + logLines = append(logLines, loggedLine) + } + + return logLines, nil +} + +func lineHas(lines []map[string]any, key, value string) bool { + for _, line := range lines { + if line[key] == value { + return true + } + } + return false +} + +func simulateConsoleOutput(t *testing.T) (*bytes.Buffer, func()) { + b := &bytes.Buffer{} + log.ApplyConfig(logging.Config{ + EncoderFormat: logging.NewEncoderFormatOption(logging.JSON), + Pipe: b, + }) + + f, err := os.CreateTemp(t.TempDir(), "tmpFile") + require.NoError(t, err) + originalStdout := os.Stdout + os.Stdout = f + + return b, func() { + os.Stdout = originalStdout + f.Close() + os.Remove(f.Name()) + } +} + +func execAddSchemaCmd(t *testing.T, cfg *config.Config, schema string) { + addSchemaCmd := MakeSchemaAddCommand(cfg) + err := addSchemaCmd.RunE(addSchemaCmd, []string{schema}) + require.NoError(t, err) +} + +func execCreateIndexCmd(t *testing.T, cfg *config.Config, collection, fields, name string) { + indexCreateCmd := MakeIndexCreateCommand(cfg) + indexCreateCmd.SetArgs([]string{ + "--collection", collection, + "--fields", fields, + "--name", name, + }) + err := indexCreateCmd.Execute() + require.NoError(t, err) +} + +func hasLogWithKey(logLines []map[string]any, key string) bool { + for _, logLine := range logLines { + if _, ok := logLine[key]; ok { + return true + } + } + return false +} + +func TestIndexCreateCmd_IfInvalidAddress_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + cfg.API.Address = "invalid address" + indexCreateCmd := MakeIndexCreateCommand(cfg) + + indexCreateCmd.SetArgs([]string{ + "--collection", "User", + "--fields", "Name", + "--name", "users_name_index", + }) + err := indexCreateCmd.Execute() + require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) +} + +func TestIndexCreateCmd_IfNoCollection_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + indexCreateCmd := MakeIndexCreateCommand(cfg) + + outputBuf := bytes.NewBufferString("") + indexCreateCmd.SetOut(outputBuf) + + indexCreateCmd.SetArgs([]string{ + "--collection", "User", + "--fields", "Name", + "--name", "users_name_index", + }) + err := indexCreateCmd.Execute() + require.NoError(t, err) + + out, err := io.ReadAll(outputBuf) + require.NoError(t, err) + + r := make(map[string]any) + err = json.Unmarshal(out, &r) + require.NoError(t, err) + + _, hasErrors := r["errors"] + assert.True(t, hasErrors, "command should return error") +} + +func TestIndexCreateCmd_IfNoErrors_ReturnData(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + execAddSchemaCmd(t, cfg, `type User { name: String }`) + + indexCreateCmd := MakeIndexCreateCommand(cfg) + outputBuf := bytes.NewBufferString("") + indexCreateCmd.SetOut(outputBuf) + + indexCreateCmd.SetArgs([]string{ + "--collection", "User", + "--fields", "name", + "--name", "users_name_index", + }) + err := indexCreateCmd.Execute() + require.NoError(t, err) + + out, err := io.ReadAll(outputBuf) + require.NoError(t, err) + + r := make(map[string]any) + err = json.Unmarshal(out, &r) + require.NoError(t, err) + + _, hasData := r["data"] + assert.True(t, hasData, "command should return data") +} + +func TestIndexCreateCmd_WithConsoleOutputIfNoCollection_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + indexCreateCmd := MakeIndexCreateCommand(cfg) + indexCreateCmd.SetArgs([]string{ + "--collection", "User", + "--fields", "Name", + "--name", "users_name_index", + }) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexCreateCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + assert.True(t, hasLogWithKey(logLines, "Errors")) +} + +func TestIndexCreateCmd_WithConsoleOutputIfNoErrors_ReturnData(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + execAddSchemaCmd(t, cfg, `type User { name: String }`) + + const indexName = "users_name_index" + indexCreateCmd := MakeIndexCreateCommand(cfg) + indexCreateCmd.SetArgs([]string{ + "--collection", "User", + "--fields", "name", + "--name", indexName, + }) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexCreateCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.Len(t, logLines, 1) + result, ok := logLines[0]["Index"].(map[string]any) + require.True(t, ok) + assert.Equal(t, indexName, result["Name"]) + + assert.False(t, hasLogWithKey(logLines, "Errors")) +} diff --git a/cli/index_drop.go b/cli/index_drop.go new file mode 100644 index 0000000000..ef0a37db0c --- /dev/null +++ b/cli/index_drop.go @@ -0,0 +1,121 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "os" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +type indexDropResponse struct { + Data struct { + Result string `json:"result"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` +} + +func MakeIndexDropCommand(cfg *config.Config) *cobra.Command { + var collectionArg string + var nameArg string + var cmd = &cobra.Command{ + Use: "drop -c --collection -n --name ", + Short: "Drop a collection's secondary index", + Long: `Drop a collection's secondary index. + +Example: drop the index 'UsersByName' for 'Users' collection: + defradb client index create --collection Users --name UsersByName`, + ValidArgs: []string{"collection", "name"}, + RunE: func(cmd *cobra.Command, args []string) (err error) { + if collectionArg == "" || nameArg == "" { + if collectionArg == "" { + return NewErrMissingArg("collection") + } else { + return NewErrMissingArg("name") + } + } + + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + data := map[string]string{ + "collection": collectionArg, + "name": nameArg, + } + + jsonData, err := json.Marshal(data) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", endpoint.String(), bytes.NewBuffer(jsonData)) + if err != nil { + return NewErrFailedToSendRequest(err) + } + req.Header.Add("Content-Type", "application/json") + client := &http.Client{} + res, err := client.Do(req) + if err != nil { + return NewErrFailedToSendRequest(err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return err + } + + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + r := indexDropResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to drop index", + logging.NewKV("Errors", r.Errors)) + } else { + log.FeedbackInfo(cmd.Context(), "Successfully dropped index", + logging.NewKV("Result", r.Data.Result)) + } + } + return nil + }, + } + cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") + cmd.Flags().StringVarP(&nameArg, "name", "n", "", "Index name") + + return cmd +} diff --git a/cli/index_drop_test.go b/cli/index_drop_test.go new file mode 100644 index 0000000000..7fa368a458 --- /dev/null +++ b/cli/index_drop_test.go @@ -0,0 +1,121 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIndexDropCmd_IfInvalidAddress_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + cfg.API.Address = "invalid address" + indexDropCmd := MakeIndexDropCommand(cfg) + + indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) + err := indexDropCmd.Execute() + require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) +} + +func TestIndexDropCmd_IfNoCollection_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + indexDropCmd := MakeIndexDropCommand(cfg) + + outputBuf := bytes.NewBufferString("") + indexDropCmd.SetOut(outputBuf) + + indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) + err := indexDropCmd.Execute() + require.NoError(t, err) + + out, err := io.ReadAll(outputBuf) + require.NoError(t, err) + + r := make(map[string]any) + err = json.Unmarshal(out, &r) + require.NoError(t, err) + + _, hasErrors := r["errors"] + assert.True(t, hasErrors, "command should return error") +} + +func TestIndexDropCmd_IfNoErrors_ShouldReturnData(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + execAddSchemaCmd(t, cfg, `type User { name: String }`) + execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") + + indexDropCmd := MakeIndexDropCommand(cfg) + outputBuf := bytes.NewBufferString("") + indexDropCmd.SetOut(outputBuf) + + indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) + err := indexDropCmd.Execute() + require.NoError(t, err) + + out, err := io.ReadAll(outputBuf) + require.NoError(t, err) + + r := make(map[string]any) + err = json.Unmarshal(out, &r) + require.NoError(t, err) + + _, hasData := r["data"] + assert.True(t, hasData, "command should return data") +} + +func TestIndexDropCmd_WithConsoleOutputIfNoCollection_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + indexDropCmd := MakeIndexDropCommand(cfg) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) + err := indexDropCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + assert.True(t, hasLogWithKey(logLines, "Errors")) +} + +func TestIndexDropCmd_WithConsoleOutputIfNoErrors_ShouldReturnData(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + execAddSchemaCmd(t, cfg, `type User { name: String }`) + execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") + + indexDropCmd := MakeIndexDropCommand(cfg) + indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexDropCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.Len(t, logLines, 1) + assert.Equal(t, "success", logLines[0]["Result"]) + + assert.False(t, hasLogWithKey(logLines, "Errors")) +} diff --git a/cli/index_list.go b/cli/index_list.go new file mode 100644 index 0000000000..131782cfe5 --- /dev/null +++ b/cli/index_list.go @@ -0,0 +1,110 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "os" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/logging" +) + +type indexListResponse struct { + Data struct { + Collections map[string][]client.IndexDescription `json:"collections"` + Indexes []client.IndexDescription `json:"indexes"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` +} + +func MakeIndexListCommand(cfg *config.Config) *cobra.Command { + var collectionArg string + var cmd = &cobra.Command{ + Use: "list [-c --collection ]", + Short: "Shows the list indexes in the database or for a specific collection", + Long: `Shows the list indexes in the database or for a specific collection. + +If the --collection flag is provided, only the indexes for that collection will be shown. +Otherwise, all indexes in the database will be shown. + +Example: show all index for 'Users' collection: + defradb client index list --collection Users`, + ValidArgs: []string{"collection"}, + RunE: func(cmd *cobra.Command, args []string) (err error) { + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + if collectionArg != "" { + values := url.Values{ + "collection": {collectionArg}, + } + endpoint.RawQuery = values.Encode() + } + + res, err := http.Get(endpoint.String()) + if err != nil { + return NewErrFailedToSendRequest(err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return err + } + + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + r := indexListResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to list index", + logging.NewKV("Errors", r.Errors)) + } else if collectionArg != "" { + log.FeedbackInfo(cmd.Context(), "Fetched indexes for collection "+collectionArg, + logging.NewKV("Indexes", r.Data.Indexes)) + } else { + log.FeedbackInfo(cmd.Context(), "Fetched all indexes", + logging.NewKV("Collections", r.Data.Collections)) + } + } + return nil + }, + } + cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") + + return cmd +} diff --git a/cli/index_list_test.go b/cli/index_list_test.go new file mode 100644 index 0000000000..548d2af040 --- /dev/null +++ b/cli/index_list_test.go @@ -0,0 +1,145 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bytes" + "encoding/json" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIndexListCmd_IfInvalidAddress_ReturnError(t *testing.T) { + cfg := getTestConfig(t) + cfg.API.Address = "invalid address" + indexCreateCmd := MakeIndexListCommand(cfg) + + err := indexCreateCmd.RunE(indexCreateCmd, nil) + require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) +} + +func TestIndexListCmd_IfNoErrors_ShouldReturnData(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + execAddSchemaCmd(t, cfg, `type User { name: String }`) + execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") + + indexListCmd := MakeIndexListCommand(cfg) + outputBuf := bytes.NewBufferString("") + indexListCmd.SetOut(outputBuf) + + err := indexListCmd.Execute() + require.NoError(t, err) + + out, err := io.ReadAll(outputBuf) + require.NoError(t, err) + + r := make(map[string]any) + err = json.Unmarshal(out, &r) + require.NoError(t, err) + + _, hasData := r["data"] + assert.True(t, hasData, "command should return data") +} + +func TestIndexListCmd_WithConsoleOutputIfCollectionDoesNotExist_ReturnError(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + indexListCmd := MakeIndexListCommand(cfg) + indexListCmd.SetArgs([]string{"--collection", "User"}) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexListCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.True(t, hasLogWithKey(logLines, "Errors")) +} + +func TestIndexListCmd_WithConsoleOutputIfCollectionIsGiven_ReturnCollectionList(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + const indexName = "users_name_index" + execAddSchemaCmd(t, cfg, `type User { name: String }`) + execCreateIndexCmd(t, cfg, "User", "name", indexName) + + indexListCmd := MakeIndexListCommand(cfg) + indexListCmd.SetArgs([]string{"--collection", "User"}) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexListCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.Len(t, logLines, 1) + resultList, ok := logLines[0]["Indexes"].([]any) + require.True(t, ok) + require.Len(t, resultList, 1) + result, ok := resultList[0].(map[string]any) + require.True(t, ok) + assert.Equal(t, indexName, result["Name"]) + + assert.False(t, hasLogWithKey(logLines, "Errors")) +} + +func TestIndexListCmd_WithConsoleOutputIfNoArgs_ReturnAllIndexes(t *testing.T) { + cfg, _, close := startTestNode(t) + defer close() + + const userIndexName = "users_name_index" + const productIndexName = "product_price_index" + execAddSchemaCmd(t, cfg, `type User { name: String }`) + execAddSchemaCmd(t, cfg, `type Product { price: Int }`) + execCreateIndexCmd(t, cfg, "User", "name", userIndexName) + execCreateIndexCmd(t, cfg, "Product", "price", productIndexName) + + indexListCmd := MakeIndexListCommand(cfg) + + outputBuf, revertOutput := simulateConsoleOutput(t) + defer revertOutput() + + err := indexListCmd.Execute() + require.NoError(t, err) + + logLines, err := parseLines(outputBuf) + require.NoError(t, err) + require.Len(t, logLines, 1) + resultCollections, ok := logLines[0]["Collections"].(map[string]any) + require.True(t, ok) + + userCollection, ok := resultCollections["User"].([]any) + require.True(t, ok) + require.Len(t, userCollection, 1) + userIndex, ok := userCollection[0].(map[string]any) + require.True(t, ok) + require.Equal(t, userIndexName, userIndex["Name"]) + + productCollection, ok := resultCollections["Product"].([]any) + require.True(t, ok) + require.Len(t, productCollection, 1) + productIndex, ok := productCollection[0].(map[string]any) + require.True(t, ok) + require.Equal(t, productIndexName, productIndex["Name"]) + + assert.False(t, hasLogWithKey(logLines, "Errors")) +} diff --git a/cli/init.go b/cli/init.go index 9d188509bd..f9af1850b7 100644 --- a/cli/init.go +++ b/cli/init.go @@ -32,7 +32,8 @@ func MakeInitCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "init", Short: "Initialize DefraDB's root directory and configuration file", - Long: "Initialize a directory for configuration and data at the given path.", + Long: `Initialize a directory for configuration and data at the given path. +Passed flags will be persisted in the stored configuration.`, // Load a default configuration, considering env. variables and CLI flags. PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { if err := cfg.LoadWithRootdir(false); err != nil { diff --git a/cli/p2p_collection.go b/cli/p2p_collection.go index 143820d4d8..6ce6d8e7c7 100644 --- a/cli/p2p_collection.go +++ b/cli/p2p_collection.go @@ -17,8 +17,9 @@ import ( func MakeP2PCollectionCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "p2pcollection", - Short: "Interact with the P2P collection system", - Long: "Add, delete, or get the list of P2P collections", + Short: "Configure the P2P collection system", + Long: `Add, delete, or get the list of P2P collections. +The selected collections synchronize their events on the pubsub network.`, } return cmd } diff --git a/cli/p2p_collection_add.go b/cli/p2p_collection_add.go index d0fc18b6db..46a4f171e1 100644 --- a/cli/p2p_collection_add.go +++ b/cli/p2p_collection_add.go @@ -27,7 +27,8 @@ func MakeP2PCollectionAddCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "add [collectionID]", Short: "Add P2P collections", - Long: `Use this command if you wish to add new P2P collections to the pubsub topics`, + Long: `Add P2P collections to the synchronized pubsub topics. +The collections are synchronized between nodes of a pubsub network.`, Args: func(cmd *cobra.Command, args []string) error { if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { return errors.New("must specify at least one collectionID") @@ -38,7 +39,7 @@ func MakeP2PCollectionAddCommand(cfg *config.Config) *cobra.Command { cred := insecure.NewCredentials() client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) if err != nil { - return errors.Wrap("failed to create RPC client", err) + return ErrFailedToCreateRPCClient } rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() @@ -51,9 +52,9 @@ func MakeP2PCollectionAddCommand(cfg *config.Config) *cobra.Command { err = client.AddP2PCollections(ctx, args...) if err != nil { - return errors.Wrap("failed to add p2p collections, request failed", err) + return errors.Wrap("failed to add P2P collections, request failed", err) } - log.FeedbackInfo(ctx, "Successfully added p2p collections", logging.NewKV("Collections", args)) + log.FeedbackInfo(ctx, "Successfully added P2P collections", logging.NewKV("Collections", args)) return nil }, } diff --git a/cli/p2p_collection_getall.go b/cli/p2p_collection_getall.go index 7e34339e0d..cb9c9f4025 100644 --- a/cli/p2p_collection_getall.go +++ b/cli/p2p_collection_getall.go @@ -27,7 +27,8 @@ func MakeP2PCollectionGetallCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "getall", Short: "Get all P2P collections", - Long: `Use this command if you wish to get all P2P collections in the pubsub topics`, + Long: `Get all P2P collections in the pubsub topics. +This is the list of collections of the node that are synchronized on the pubsub network.`, Args: func(cmd *cobra.Command, args []string) error { if err := cobra.NoArgs(cmd, args); err != nil { return errors.New("must specify no argument") @@ -38,7 +39,7 @@ func MakeP2PCollectionGetallCommand(cfg *config.Config) *cobra.Command { cred := insecure.NewCredentials() client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) if err != nil { - return errors.Wrap("failed to create RPC client", err) + return ErrFailedToCreateRPCClient } rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() @@ -51,7 +52,7 @@ func MakeP2PCollectionGetallCommand(cfg *config.Config) *cobra.Command { collections, err := client.GetAllP2PCollections(ctx) if err != nil { - return errors.Wrap("failed to add p2p collections, request failed", err) + return errors.Wrap("failed to add P2P collections, request failed", err) } if len(collections) > 0 { diff --git a/cli/p2p_collection_remove.go b/cli/p2p_collection_remove.go index ad79a86d1a..66dbd5fa16 100644 --- a/cli/p2p_collection_remove.go +++ b/cli/p2p_collection_remove.go @@ -26,8 +26,9 @@ import ( func MakeP2PCollectionRemoveCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "remove [collectionID]", - Short: "Add P2P collections", - Long: `Use this command if you wish to remove P2P collections from the pubsub topics`, + Short: "Remove P2P collections", + Long: `Remove P2P collections from the followed pubsub topics. +The removed collections will no longer be synchronized between nodes.`, Args: func(cmd *cobra.Command, args []string) error { if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { return errors.New("must specify at least one collectionID") @@ -38,7 +39,7 @@ func MakeP2PCollectionRemoveCommand(cfg *config.Config) *cobra.Command { cred := insecure.NewCredentials() client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) if err != nil { - return errors.Wrap("failed to create RPC client", err) + return ErrFailedToCreateRPCClient } rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() @@ -51,9 +52,9 @@ func MakeP2PCollectionRemoveCommand(cfg *config.Config) *cobra.Command { err = client.RemoveP2PCollections(ctx, args...) if err != nil { - return errors.Wrap("failed to remove p2p collections, request failed", err) + return errors.Wrap("failed to remove P2P collections, request failed", err) } - log.FeedbackInfo(ctx, "Successfully removed p2p collections", logging.NewKV("Collections", args)) + log.FeedbackInfo(ctx, "Successfully removed P2P collections", logging.NewKV("Collections", args)) return nil }, } diff --git a/cli/peerid.go b/cli/peerid.go index 27559c2302..a3d269fb2d 100644 --- a/cli/peerid.go +++ b/cli/peerid.go @@ -12,7 +12,6 @@ package cli import ( "encoding/json" - "fmt" "io" "net/http" "os" @@ -27,7 +26,8 @@ import ( func MakePeerIDCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "peerid", - Short: "Get the PeerID of the DefraDB node", + Short: "Get the PeerID of the node", + Long: `Get the PeerID of the node.`, RunE: func(cmd *cobra.Command, _ []string) (err error) { stdout, err := os.Stdout.Stat() if err != nil { @@ -49,7 +49,7 @@ func MakePeerIDCommand(cfg *config.Config) *cobra.Command { defer func() { if e := res.Body.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to read response body: %v", e.Error()), err) + err = NewErrFailedToCloseResponseBody(e, err) } }() diff --git a/cli/ping.go b/cli/ping.go index 11ca129850..210847dfcc 100644 --- a/cli/ping.go +++ b/cli/ping.go @@ -12,7 +12,6 @@ package cli import ( "encoding/json" - "fmt" "io" "net/http" "os" @@ -27,7 +26,7 @@ import ( func MakePingCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "ping", - Short: "Ping to test connection to a node", + Short: "Ping to test connection with a node", RunE: func(cmd *cobra.Command, _ []string) (err error) { stdout, err := os.Stdout.Stat() if err != nil { @@ -49,7 +48,7 @@ func MakePingCommand(cfg *config.Config) *cobra.Command { defer func() { if e := res.Body.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to read response body: %v", e.Error()), err) + err = NewErrFailedToCloseResponseBody(e, err) } }() diff --git a/cli/replicator.go b/cli/replicator.go index fb6946ac29..c7956c80a6 100644 --- a/cli/replicator.go +++ b/cli/replicator.go @@ -17,8 +17,9 @@ import ( func MakeReplicatorCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "replicator", - Short: "Interact with the replicator system", - Long: "Add, delete, or get the list of persisted replicators", + Short: "Configure the replicator system", + Long: `Configure the replicator system. Add, delete, or get the list of persisted replicators. +A replicator replicates one or all collection(s) from one node to another.`, } return cmd } diff --git a/cli/replicator_delete.go b/cli/replicator_delete.go index 0bd6e0374b..eb7e580f12 100644 --- a/cli/replicator_delete.go +++ b/cli/replicator_delete.go @@ -31,9 +31,8 @@ func MakeReplicatorDeleteCommand(cfg *config.Config) *cobra.Command { ) var cmd = &cobra.Command{ Use: "delete [-f, --full | -c, --collection] ", - Short: "Delete a replicator", - Long: `Use this command if you wish to remove the target replicator - for the p2p data sync system.`, + Short: "Delete a replicator. It will stop synchronizing", + Long: `Delete a replicator. It will stop synchronizing.`, Args: func(cmd *cobra.Command, args []string) error { if err := cobra.ExactArgs(1)(cmd, args); err != nil { return errors.New("must specify one argument: PeerID") @@ -50,7 +49,7 @@ func MakeReplicatorDeleteCommand(cfg *config.Config) *cobra.Command { cred := insecure.NewCredentials() client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) if err != nil { - return errors.Wrap("failed to create RPC client", err) + return ErrFailedToCreateRPCClient } rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() @@ -63,7 +62,7 @@ func MakeReplicatorDeleteCommand(cfg *config.Config) *cobra.Command { pid, err := peer.Decode(pidString) if err != nil { - return errors.Wrap("failed to parse PeerID from string", err) + return NewErrFailedParsePeerID(err) } err = client.DeleteReplicator(ctx, pid) diff --git a/cli/replicator_getall.go b/cli/replicator_getall.go index 0c03b34e3a..63cd6533ba 100644 --- a/cli/replicator_getall.go +++ b/cli/replicator_getall.go @@ -27,7 +27,8 @@ func MakeReplicatorGetallCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "getall", Short: "Get all replicators", - Long: `Use this command if you wish to get all the replicators for the p2p data sync system.`, + Long: `Get all the replicators active in the P2P data sync system. +These are the replicators that are currently replicating data from one node to another.`, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 0 { if err := cmd.Usage(); err != nil { diff --git a/cli/replicator_set.go b/cli/replicator_set.go index 377761a9f2..acb70d0cfd 100644 --- a/cli/replicator_set.go +++ b/cli/replicator_set.go @@ -32,8 +32,9 @@ func MakeReplicatorSetCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "set [-f, --full | -c, --collection] ", Short: "Set a P2P replicator", - Long: `Use this command if you wish to add a new target replicator - for the p2p data sync system or add schemas to an existing one`, + Long: `Add a new target replicator. +A replicator replicates one or all collection(s) from this node to another. +`, Args: func(cmd *cobra.Command, args []string) error { if err := cobra.ExactArgs(1)(cmd, args); err != nil { return errors.New("must specify one argument: peer") @@ -43,7 +44,7 @@ func MakeReplicatorSetCommand(cfg *config.Config) *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { peerAddr, err := ma.NewMultiaddr(args[0]) if err != nil { - return errors.Wrap("could not parse peer address", err) + return NewErrFailedParsePeerID(err) } if len(col) == 0 && !fullRep { return errors.New("must run with either --full or --collection") @@ -52,7 +53,7 @@ func MakeReplicatorSetCommand(cfg *config.Config) *cobra.Command { cred := insecure.NewCredentials() client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) if err != nil { - return errors.Wrap("failed to create RPC client", err) + return ErrFailedToCreateRPCClient } rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() diff --git a/cli/request.go b/cli/request.go index b0c85a854b..1b8f86ced8 100644 --- a/cli/request.go +++ b/cli/request.go @@ -11,7 +11,6 @@ package cli import ( - "fmt" "io" "net/http" "net/url" @@ -109,7 +108,7 @@ To learn more about the DefraDB GraphQL Query Language, refer to https://docs.so defer func() { if e := res.Body.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to read response body: %v", e.Error()), err) + err = NewErrFailedToCloseResponseBody(e, err) } }() diff --git a/cli/root.go b/cli/root.go index 941cc8b882..e639cde785 100644 --- a/cli/root.go +++ b/cli/root.go @@ -25,10 +25,7 @@ func MakeRootCommand(cfg *config.Config) *cobra.Command { Short: "DefraDB Edge Database", Long: `DefraDB is the edge database to power the user-centric future. -Start a database node, issue a request to a local or remote node, and much more. - -DefraDB is released under the BSL license, (c) 2022 Democratized Data Foundation. -See https://docs.source.network/BSL.txt for more information. +Start a DefraDB node, interact with a local or remote node, and much more. `, // Runs on subcommands before their Run function, to handle configuration and top-level flags. // Loads the rootDir containing the configuration file, otherwise warn about it and load a default configuration. diff --git a/cli/rpc.go b/cli/rpc.go index 02caa055fb..afb1a007e2 100644 --- a/cli/rpc.go +++ b/cli/rpc.go @@ -21,12 +21,12 @@ import ( func MakeRPCCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "rpc", - Short: "Interact with a DefraDB gRPC server", - Long: "Interact with a DefraDB gRPC server.", + Short: "Interact with a DefraDB node via RPC", + Long: "Interact with a DefraDB node via RPC.", } cmd.PersistentFlags().String( "addr", cfg.Net.RPCAddress, - "gRPC endpoint address", + "RPC endpoint address", ) if err := cfg.BindFlag("net.rpcaddress", cmd.PersistentFlags().Lookup("addr")); err != nil { diff --git a/cli/schema.go b/cli/schema.go index dc96539c71..9316768316 100644 --- a/cli/schema.go +++ b/cli/schema.go @@ -17,8 +17,8 @@ import ( func MakeSchemaCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "schema", - Short: "Interact with the schema system of a running DefraDB instance", - Long: `Make changes, updates, or look for existing schema types to a DefraDB node.`, + Short: "Interact with the schema system of a DefraDB node", + Long: `Make changes, updates, or look for existing schema types.`, } return cmd diff --git a/cli/schema_add.go b/cli/schema_add.go index 4fc916567f..b5f28f15d3 100644 --- a/cli/schema_add.go +++ b/cli/schema_add.go @@ -12,7 +12,6 @@ package cli import ( "encoding/json" - "fmt" "io" "net/http" "os" @@ -30,8 +29,8 @@ func MakeSchemaAddCommand(cfg *config.Config) *cobra.Command { var schemaFile string var cmd = &cobra.Command{ Use: "add [schema]", - Short: "Add a new schema type to DefraDB", - Long: `Add a new schema type to DefraDB. + Short: "Add new schema", + Long: `Add new schema. Example: add from an argument string: defradb client schema add 'type Foo { ... }' @@ -94,7 +93,7 @@ Learn more about the DefraDB GraphQL Schema Language on https://docs.source.netw return errors.New("empty schema provided") } - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaLoadPath) + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) if err != nil { return errors.Wrap("join paths failed", err) } @@ -106,7 +105,7 @@ Learn more about the DefraDB GraphQL Schema Language on https://docs.source.netw defer func() { if e := res.Body.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to read response body: %v", e.Error()), err) + err = NewErrFailedToCloseResponseBody(e, err) } }() diff --git a/cli/schema_list.go b/cli/schema_list.go new file mode 100644 index 0000000000..3a0e32bcce --- /dev/null +++ b/cli/schema_list.go @@ -0,0 +1,89 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "net/http" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/errors" +) + +type schemaListResponse struct { + Data struct { + Collections []struct { + Name string `json:"name"` + ID string `json:"id"` + VersionID string `json:"version_id"` + Fields []struct { + ID string `json:"id"` + Name string `json:"name"` + Kind string `json:"kind"` + Internal bool `json:"internal"` + } `json:"fields"` + } `json:"collections"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` +} + +func MakeSchemaListCommand(cfg *config.Config) *cobra.Command { + var cmd = &cobra.Command{ + Use: "list", + Short: "List schema types with their respective fields", + RunE: func(cmd *cobra.Command, args []string) (err error) { + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) + if err != nil { + return NewErrFailedToJoinEndpoint(err) + } + + res, err := http.Get(endpoint.String()) + if err != nil { + return NewErrFailedToSendRequest(err) + } + defer res.Body.Close() //nolint:errcheck + + data, err := io.ReadAll(res.Body) + if err != nil { + return NewErrFailedToReadResponseBody(err) + } + + var r schemaListResponse + if err := json.Unmarshal(data, &r); err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + return errors.New("failed to list schemas", errors.NewKV("errors", r.Errors)) + } + + for _, c := range r.Data.Collections { + cmd.Printf("# Schema ID: %s\n", c.ID) + cmd.Printf("# Version ID: %s\n", c.VersionID) + cmd.Printf("type %s {\n", c.Name) + for _, f := range c.Fields { + if !f.Internal { + cmd.Printf("\t%s: %s\n", f.Name, f.Kind) + } + } + cmd.Printf("}\n\n") + } + + return nil + }, + } + return cmd +} diff --git a/cli/schema_migration.go b/cli/schema_migration.go new file mode 100644 index 0000000000..7b37fdcabe --- /dev/null +++ b/cli/schema_migration.go @@ -0,0 +1,25 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeSchemaMigrationCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "migration", + Short: "Interact with the schema migration system of a running DefraDB instance", + Long: `Make set or look for existing schema migrations on a DefraDB node.`, + } + + return cmd +} diff --git a/cli/schema_migration_get.go b/cli/schema_migration_get.go new file mode 100644 index 0000000000..333c2d9cf4 --- /dev/null +++ b/cli/schema_migration_get.go @@ -0,0 +1,98 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "net/http" + "os" + + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/logging" +) + +func MakeSchemaMigrationGetCommand(cfg *config.Config) *cobra.Command { + var cmd = &cobra.Command{ + Use: "get", + Short: "Gets the schema migrations within DefraDB", + Long: `Gets the schema migrations within the local DefraDB node. + +Example: + defradb client schema migration get' + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, + RunE: func(cmd *cobra.Command, args []string) (err error) { + if err := cobra.NoArgs(cmd, args); err != nil { + return NewErrTooManyArgs(0, len(args)) + } + + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaMigrationPath) + if err != nil { + return errors.Wrap("join paths failed", err) + } + + res, err := http.Get(endpoint.String()) + if err != nil { + return errors.Wrap("failed to get schema migrations", err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return errors.Wrap("failed to read response body", err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return errors.Wrap("failed to stat stdout", err) + } + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + type migrationGetResponse struct { + Data struct { + Configuration []client.LensConfig `json:"configuration"` + } `json:"data"` + Errors []struct { + Message string `json:"message"` + } `json:"errors"` + } + r := migrationGetResponse{} + err = json.Unmarshal(response, &r) + log.FeedbackInfo(cmd.Context(), string(response)) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to get schema migrations", + logging.NewKV("Errors", r.Errors)) + } else { + log.FeedbackInfo(cmd.Context(), "Successfully got schema migrations", + logging.NewKV("Configuration", r.Data.Configuration)) + } + } + + return nil + }, + } + return cmd +} diff --git a/cli/schema_migration_set.go b/cli/schema_migration_set.go new file mode 100644 index 0000000000..633cbf0115 --- /dev/null +++ b/cli/schema_migration_set.go @@ -0,0 +1,178 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "net/http" + "os" + "strings" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/spf13/cobra" + + httpapi "github.com/sourcenetwork/defradb/api/http" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/logging" +) + +func MakeSchemaMigrationSetCommand(cfg *config.Config) *cobra.Command { + var lensFile string + var cmd = &cobra.Command{ + Use: "set [src] [dst] [cfg]", + Short: "Set a schema migration within DefraDB", + Long: `Set a migration between two schema versions within the local DefraDB node. + +Example: set from an argument string: + defradb client schema migration set bae123 bae456 '{"lenses": [...' + +Example: set from file: + defradb client schema migration set bae123 bae456 -f schema_migration.lens + +Example: add from stdin: + cat schema_migration.lens | defradb client schema migration set bae123 bae456 - + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, + RunE: func(cmd *cobra.Command, args []string) (err error) { + if err := cobra.MinimumNArgs(2)(cmd, args); err != nil { + return NewErrMissingArgs([]string{"src", "dst", "cfg"}) + } + if err := cobra.MaximumNArgs(3)(cmd, args); err != nil { + return NewErrTooManyArgs(3, len(args)) + } + + var lensCfgJson string + var srcSchemaVersionID string + var dstSchemaVersionID string + fi, err := os.Stdin.Stat() + if err != nil { + return err + } + + if lensFile != "" { + buf, err := os.ReadFile(lensFile) + if err != nil { + return errors.Wrap("failed to read schema file", err) + } + lensCfgJson = string(buf) + } else if len(args) == 2 { + // If the lensFile flag has not been provided then it must be provided as an arg + // and thus len(args) cannot be 2 + return NewErrMissingArg("cfg") + } else if isFileInfoPipe(fi) && args[2] != "-" { + log.FeedbackInfo( + cmd.Context(), + "Run 'defradb client schema migration set -' to read from stdin."+ + " Example: 'cat schema_migration.lens | defradb client schema migration set -').", + ) + return nil + } else if args[2] == "-" { + stdin, err := readStdin() + if err != nil { + return errors.Wrap("failed to read stdin", err) + } + if len(stdin) == 0 { + return errors.New("no lens cfg in stdin provided") + } else { + lensCfgJson = stdin + } + } else { + lensCfgJson = args[2] + } + + srcSchemaVersionID = args[0] + dstSchemaVersionID = args[1] + + if lensCfgJson == "" { + return NewErrMissingArg("cfg") + } + if srcSchemaVersionID == "" { + return NewErrMissingArg("src") + } + if dstSchemaVersionID == "" { + return NewErrMissingArg("dst") + } + + decoder := json.NewDecoder(strings.NewReader(lensCfgJson)) + decoder.DisallowUnknownFields() + + var lensCfg model.Lens + err = decoder.Decode(&lensCfg) + if err != nil { + return errors.Wrap("invalid lens configuration", err) + } + + migrationCfg := client.LensConfig{ + SourceSchemaVersionID: srcSchemaVersionID, + DestinationSchemaVersionID: dstSchemaVersionID, + Lens: lensCfg, + } + + migrationCfgJson, err := json.Marshal(migrationCfg) + if err != nil { + return errors.Wrap("failed to marshal cfg", err) + } + + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaMigrationPath) + if err != nil { + return errors.Wrap("join paths failed", err) + } + + res, err := http.Post(endpoint.String(), "application/json", strings.NewReader(string(migrationCfgJson))) + if err != nil { + return errors.Wrap("failed to post schema migration", err) + } + + defer func() { + if e := res.Body.Close(); e != nil { + err = NewErrFailedToCloseResponseBody(e, err) + } + }() + + response, err := io.ReadAll(res.Body) + if err != nil { + return errors.Wrap("failed to read response body", err) + } + + stdout, err := os.Stdout.Stat() + if err != nil { + return errors.Wrap("failed to stat stdout", err) + } + if isFileInfoPipe(stdout) { + cmd.Println(string(response)) + } else { + type migrationSetResponse struct { + Errors []struct { + Message string `json:"message"` + } `json:"errors"` + } + r := migrationSetResponse{} + err = json.Unmarshal(response, &r) + if err != nil { + return NewErrFailedToUnmarshalResponse(err) + } + if len(r.Errors) > 0 { + log.FeedbackError(cmd.Context(), "Failed to set schema migration", + logging.NewKV("Errors", r.Errors)) + } else { + log.FeedbackInfo(cmd.Context(), "Successfully set schema migration") + } + } + + return nil + }, + } + cmd.Flags().StringVarP(&lensFile, "file", "f", "", "Lens configuration file") + return cmd +} diff --git a/cli/schema_patch.go b/cli/schema_patch.go index 31ac830345..b1e962c51a 100644 --- a/cli/schema_patch.go +++ b/cli/schema_patch.go @@ -31,7 +31,7 @@ func MakeSchemaPatchCommand(cfg *config.Config) *cobra.Command { Short: "Patch an existing schema type", Long: `Patch an existing schema. -Uses JSON PATCH formatting as a DDL. +Uses JSON Patch to modify schema types. Example: patch from an argument string: defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' @@ -54,7 +54,7 @@ To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.s if err = cmd.Usage(); err != nil { return err } - return ErrTooManyArgs + return NewErrTooManyArgs(1, len(args)) } if patchFile != "" { @@ -95,12 +95,16 @@ To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.s return ErrEmptyFile } - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPatchPath) + endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) if err != nil { return err } - res, err := http.Post(endpoint.String(), "text", strings.NewReader(patch)) + req, err := http.NewRequest(http.MethodPatch, endpoint.String(), strings.NewReader(patch)) + if err != nil { + return NewErrFailedToSendRequest(err) + } + res, err := http.DefaultClient.Do(req) if err != nil { return NewErrFailedToSendRequest(err) } diff --git a/cli/start.go b/cli/start.go index b830a79b9a..5d571be46d 100644 --- a/cli/start.go +++ b/cli/start.go @@ -36,17 +36,16 @@ import ( "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" - netapi "github.com/sourcenetwork/defradb/net/api" - netpb "github.com/sourcenetwork/defradb/net/api/pb" + "github.com/sourcenetwork/defradb/net" + netpb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" - "github.com/sourcenetwork/defradb/node" ) func MakeStartCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "start", Short: "Start a DefraDB node", - Long: "Start a new instance of DefraDB node.", + Long: "Start a DefraDB node.", // Load the root config if it exists, otherwise create it. PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { if err := cfg.LoadRootDirFromFlagOrDefault(); err != nil { @@ -194,7 +193,7 @@ func MakeStartCommand(cfg *config.Config) *cobra.Command { } type defraInstance struct { - node *node.Node + node *net.Node db client.DB server *httpapi.Server } @@ -252,13 +251,13 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { } // init the p2p node - var n *node.Node + var n *net.Node if !cfg.Net.P2PDisabled { log.FeedbackInfo(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) - n, err = node.NewNode( + n, err = net.NewNode( ctx, db, - cfg.NodeConfig(), + net.WithConfig(cfg), ) if err != nil { db.Close(ctx) @@ -315,11 +314,9 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", addr), err) } - netService := netapi.NewService(n.Peer) - go func() { log.FeedbackInfo(ctx, "Started RPC server", logging.NewKV("Address", addr)) - netpb.RegisterServiceServer(server, netService) + netpb.RegisterCollectionServer(server, n.Peer) if err := server.Serve(tcplistener); err != nil && !errors.Is(err, grpc.ErrServerStopped) { log.FeedbackFatalE(ctx, "Failed to start RPC server", err) } diff --git a/client/backup.go b/client/backup.go new file mode 100644 index 0000000000..58ccf1f9d2 --- /dev/null +++ b/client/backup.go @@ -0,0 +1,36 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package client + +import ( + "context" +) + +// Backup contains DefraDB's supported backup operations. +type Backup interface { + // BasicImport imports a json dataset. + // filepath must be accessible to the node. + BasicImport(ctx context.Context, filepath string) error + // BasicExport exports the current data or subset of data to file in json format. + BasicExport(ctx context.Context, config *BackupConfig) error +} + +// BackupConfig holds the configuration parameters for database backups. +type BackupConfig struct { + // If a file already exists at this location, it will be truncated and overwriten. + Filepath string `json:"filepath"` + // Only JSON is supported at the moment. + Format string `json:"format"` + // Pretty print JSON. + Pretty bool `json:"pretty"` + // List of collection names to select which one to backup. + Collections []string `json:"collections"` +} diff --git a/client/collection.go b/client/collection.go index f59bf43d6b..9c91dccb7c 100644 --- a/client/collection.go +++ b/client/collection.go @@ -136,6 +136,19 @@ type Collection interface { // GetAllDocKeys returns all the document keys that exist in the collection. GetAllDocKeys(ctx context.Context) (<-chan DocKeysResult, error) + + // CreateIndex creates a new index on the collection. + // `IndexDescription` contains the description of the index to be created. + // `IndexDescription.Name` must start with a letter or an underscore and can + // only contain letters, numbers, and underscores. + // If the name of the index is not provided, it will be generated. + CreateIndex(context.Context, IndexDescription) (IndexDescription, error) + + // DropIndex drops an index from the collection. + DropIndex(ctx context.Context, indexName string) error + + // GetIndexes returns all the indexes that exist on the collection. + GetIndexes(ctx context.Context) ([]IndexDescription, error) } // DocKeysResult wraps the result of an attempt at a DocKey retrieval operation. diff --git a/client/db.go b/client/db.go index 4ec69668cd..ba4dd0b89d 100644 --- a/client/db.go +++ b/client/db.go @@ -19,6 +19,8 @@ import ( "github.com/sourcenetwork/defradb/events" ) +type CollectionName = string + // DB is the primary public programmatic access point to the local DefraDB instance. // // It should be constructed via the [db] package, via the [db.NewDB] function. @@ -83,6 +85,9 @@ type Store interface { // P2P holds the P2P related methods that must be implemented by the database. P2P + // Backup holds the backup related methods that must be implemented by the database. + Backup + // AddSchema takes the provided GQL schema in SDL format, and applies it to the [Store], // creating the necessary collections, request types, etc. // @@ -106,10 +111,29 @@ type Store interface { // [FieldKindStringToEnumMapping]. PatchSchema(context.Context, string) error + // SetMigration sets the migration for the given source-destination schema version IDs. Is equivilent to + // calling `LensRegistry().SetMigration(ctx, cfg)`. + // + // There may only be one migration per schema version id. If another migration was registered it will be + // overwritten by this migration. + // + // Neither of the schema version IDs specified in the configuration need to exist at the time of calling. + // This is to allow the migration of documents of schema versions unknown to the local node recieved by the + // P2P system. + // + // Migrations will only run if there is a complete path from the document schema version to the latest local + // schema version. + SetMigration(context.Context, LensConfig) error + + // LensRegistry returns the LensRegistry in use by this database instance. + // + // It exposes several useful thread-safe migration related functions. + LensRegistry() LensRegistry + // GetCollectionByName attempts to retrieve a collection matching the given name. // // If no matching collection is found an error will be returned. - GetCollectionByName(context.Context, string) (Collection, error) + GetCollectionByName(context.Context, CollectionName) (Collection, error) // GetCollectionBySchemaID attempts to retrieve a collection matching the given schema ID. // @@ -125,6 +149,9 @@ type Store interface { // this [Store]. GetAllCollections(context.Context) ([]Collection, error) + // GetAllIndexes returns all the indexes that currently exist within this [Store]. + GetAllIndexes(context.Context) (map[CollectionName][]IndexDescription, error) + // ExecRequest executes the given GQL request against the [Store]. ExecRequest(context.Context, string) *RequestResult } diff --git a/client/descriptions.go b/client/descriptions.go index cd1d7fc53d..0b44f36b83 100644 --- a/client/descriptions.go +++ b/client/descriptions.go @@ -29,6 +29,9 @@ type CollectionDescription struct { // Schema contains the data type information that this Collection uses. Schema SchemaDescription + + // Indexes contains the secondary indexes that this Collection has. + Indexes []IndexDescription } // IDString returns the collection ID as a string. @@ -36,24 +39,12 @@ func (col CollectionDescription) IDString() string { return fmt.Sprint(col.ID) } -// GetField returns the field of the given name. -func (col CollectionDescription) GetField(name string) (FieldDescription, bool) { - if !col.Schema.IsEmpty() { - for _, field := range col.Schema.Fields { - if field.Name == name { - return field, true - } - } - } - return FieldDescription{}, false -} - // GetFieldByID searches for a field with the given ID. If such a field is found it // will return it and true, if it is not found it will return false. -func (col CollectionDescription) GetFieldByID(id string) (FieldDescription, bool) { +func (col CollectionDescription) GetFieldByID(id FieldID) (FieldDescription, bool) { if !col.Schema.IsEmpty() { for _, field := range col.Schema.Fields { - if field.ID.String() == id { + if field.ID == id { return field, true } } @@ -115,9 +106,56 @@ func (sd SchemaDescription) GetFieldKey(fieldName string) uint32 { return uint32(0) } +// GetField returns the field of the given name. +func (sd SchemaDescription) GetField(name string) (FieldDescription, bool) { + if !sd.IsEmpty() { + for _, field := range sd.Fields { + if field.Name == name { + return field, true + } + } + } + return FieldDescription{}, false +} + // FieldKind describes the type of a field. type FieldKind uint8 +func (f FieldKind) String() string { + switch f { + case FieldKind_DocKey: + return "ID" + case FieldKind_BOOL: + return "Boolean" + case FieldKind_NILLABLE_BOOL_ARRAY: + return "[Boolean]" + case FieldKind_BOOL_ARRAY: + return "[Boolean!]" + case FieldKind_INT: + return "Int" + case FieldKind_NILLABLE_INT_ARRAY: + return "[Int]" + case FieldKind_INT_ARRAY: + return "[Int!]" + case FieldKind_DATETIME: + return "DateTime" + case FieldKind_FLOAT: + return "Float" + case FieldKind_NILLABLE_FLOAT_ARRAY: + return "[Float]" + case FieldKind_FLOAT_ARRAY: + return "[Float!]" + case FieldKind_STRING: + return "String" + case FieldKind_NILLABLE_STRING_ARRAY: + return "[String]" + case FieldKind_STRING_ARRAY: + return "[String!]" + default: + return fmt.Sprint(uint8(f)) + } +} + // Note: These values are serialized and persisted in the database, avoid modifying existing values. const ( FieldKind_None FieldKind = 0 @@ -161,9 +199,9 @@ var FieldKindStringToEnumMapping = map[string]FieldKind{ "Boolean": FieldKind_BOOL, "[Boolean]": FieldKind_NILLABLE_BOOL_ARRAY, "[Boolean!]": FieldKind_BOOL_ARRAY, - "Integer": FieldKind_INT, - "[Integer]": FieldKind_NILLABLE_INT_ARRAY, - "[Integer!]": FieldKind_INT_ARRAY, + "Int": FieldKind_INT, + "[Int]": FieldKind_NILLABLE_INT_ARRAY, + "[Int!]": FieldKind_INT_ARRAY, "DateTime": FieldKind_DATETIME, "Float": FieldKind_FLOAT, "[Float]": FieldKind_NILLABLE_FLOAT_ARRAY, @@ -233,6 +271,11 @@ type FieldDescription struct { RelationType RelationType } +// IsInternal returns true if this field is internally generated. +func (f FieldDescription) IsInternal() bool { + return (f.Name == "_key") || f.RelationType&Relation_Type_INTERNAL_ID != 0 +} + // IsObject returns true if this field is an object type. func (f FieldDescription) IsObject() bool { return (f.Kind == FieldKind_FOREIGN_OBJECT) || diff --git a/client/document.go b/client/document.go index 11d432d1fa..5c8fd9441d 100644 --- a/client/document.go +++ b/client/document.go @@ -17,7 +17,9 @@ import ( "github.com/fxamacker/cbor/v2" "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" + + "github.com/sourcenetwork/defradb/client/request" + ccid "github.com/sourcenetwork/defradb/core/cid" ) // This is the main implementation starting point for accessing the internal Document API @@ -51,11 +53,17 @@ import ( // @body: A document interface can be implemented by both a TypedDocument and a // UnTypedDocument, which use a schema and schemaless approach respectively. type Document struct { - key DocKey - fields map[string]Field - values map[Field]Value - head cid.Cid - mu sync.RWMutex + key DocKey + // SchemaVersionID holds the id of the schema version that this document is + // currently at. + // + // Migrating the document will update this value to the output version of the + // migration. + SchemaVersionID string + fields map[string]Field + values map[Field]Value + head cid.Cid + mu sync.RWMutex // marks if document has unsaved changes isDirty bool } @@ -100,26 +108,12 @@ func NewDocFromMap(data map[string]any) (*Document, error) { return nil, err } - // if no key was specified, then we assume it doesn't exist and we generate it. + // if no key was specified, then we assume it doesn't exist and we generate, and set it. if !hasKey { - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } - - buf, err := doc.Bytes() - if err != nil { - return nil, err - } - - // And then feed it some data - c, err := pref.Sum(buf) + err = doc.generateAndSetDocKey() if err != nil { return nil, err } - doc.key = NewDocKeyV0(c) } return doc, nil @@ -251,11 +245,6 @@ func (doc *Document) Delete(fields ...string) error { return nil } -// SetAsType Sets the value of a field along with a specific type -// func (doc *Document) SetAsType(t client.CType, field string, value any) error { -// return doc.set(t, field, value) -// } - func (doc *Document) set(t CType, field string, value Value) error { doc.mu.Lock() defer doc.mu.Unlock() @@ -473,6 +462,74 @@ func (doc *Document) toMapWithKey() (map[string]any, error) { return docMap, nil } +// GenerateDocKey generates docKey/docID corresponding to the document. +func (doc *Document) GenerateDocKey() (DocKey, error) { + bytes, err := doc.Bytes() + if err != nil { + return DocKey{}, err + } + + cid, err := ccid.NewSHA256CidV1(bytes) + if err != nil { + return DocKey{}, err + } + + return NewDocKeyV0(cid), nil +} + +// setDocKey sets the `doc.key` (should NOT be public). +func (doc *Document) setDocKey(docID DocKey) { + doc.mu.Lock() + defer doc.mu.Unlock() + + doc.key = docID +} + +// generateAndSetDocKey generates the docKey/docID and then (re)sets `doc.key`. +func (doc *Document) generateAndSetDocKey() error { + docKey, err := doc.GenerateDocKey() + if err != nil { + return err + } + + doc.setDocKey(docKey) + return nil +} + +func (doc *Document) remapAliasFields(fieldDescriptions []FieldDescription) (bool, error) { + doc.mu.Lock() + defer doc.mu.Unlock() + + foundAlias := false + for docField, docFieldValue := range doc.fields { + for _, fieldDescription := range fieldDescriptions { + maybeAliasField := docField + request.RelatedObjectID + if fieldDescription.Name == maybeAliasField { + foundAlias = true + doc.fields[maybeAliasField] = docFieldValue + delete(doc.fields, docField) + } + } + } + + return foundAlias, nil +} + +// RemapAliasFieldsAndDockey remaps the alias fields and fixes (overwrites) the dockey. +func (doc *Document) RemapAliasFieldsAndDockey(fieldDescriptions []FieldDescription) error { + foundAlias, err := doc.remapAliasFields(fieldDescriptions) + if err != nil { + return err + } + + if !foundAlias { + return nil + } + + // Update the dockey so dockey isn't based on an aliased name of a field. + return doc.generateAndSetDocKey() +} + // DocumentStatus represent the state of the document in the DAG store. // It can either be `Active“ or `Deleted`. type DocumentStatus uint8 diff --git a/client/document_test.go b/client/document_test.go index e49f238f48..c2e9b406c0 100644 --- a/client/document_test.go +++ b/client/document_test.go @@ -13,9 +13,9 @@ package client import ( "testing" - "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" "github.com/stretchr/testify/assert" + + ccid "github.com/sourcenetwork/defradb/core/cid" ) var ( @@ -28,12 +28,7 @@ var ( } }`) - pref = cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } + pref = ccid.NewDefaultSHA256PrefixV1() ) func TestNewFromJSON(t *testing.T) { diff --git a/client/errors.go b/client/errors.go index 5e55c22710..035ac87235 100644 --- a/client/errors.go +++ b/client/errors.go @@ -17,12 +17,11 @@ import ( ) const ( - errFieldNotExist string = "The given field does not exist" - errSelectOfNonGroupField string = "cannot select a non-group-by field at group-level" - errUnexpectedType string = "unexpected type" - errParsingFailed string = "failed to parse argument" - errUninitializeProperty string = "invalid state, required property is uninitialized" - errMaxTxnRetries string = "reached maximum transaction reties" + errFieldNotExist string = "The given field does not exist" + errUnexpectedType string = "unexpected type" + errParsingFailed string = "failed to parse argument" + errUninitializeProperty string = "invalid state, required property is uninitialized" + errMaxTxnRetries string = "reached maximum transaction reties" ) // Errors returnable from this package. @@ -30,21 +29,20 @@ const ( // This list is incomplete and undefined errors may also be returned. // Errors returned from this package may be tested against these errors with errors.Is. var ( - ErrFieldNotExist = errors.New(errFieldNotExist) - ErrSelectOfNonGroupField = errors.New(errSelectOfNonGroupField) - ErrUnexpectedType = errors.New(errUnexpectedType) - ErrParsingFailed = errors.New(errParsingFailed) - ErrUninitializeProperty = errors.New(errUninitializeProperty) - ErrFieldNotObject = errors.New("trying to access field on a non object type") - ErrValueTypeMismatch = errors.New("value does not match indicated type") - ErrIndexNotFound = errors.New("no index found for given ID") - ErrDocumentNotFound = errors.New("no document for the given key exists") - ErrInvalidUpdateTarget = errors.New("the target document to update is of invalid type") - ErrInvalidUpdater = errors.New("the updater of a document is of invalid type") - ErrInvalidDeleteTarget = errors.New("the target document to delete is of invalid type") - ErrMalformedDocKey = errors.New("malformed DocKey, missing either version or cid") - ErrInvalidDocKeyVersion = errors.New("invalid DocKey version") - ErrMaxTxnRetries = errors.New(errMaxTxnRetries) + ErrFieldNotExist = errors.New(errFieldNotExist) + ErrUnexpectedType = errors.New(errUnexpectedType) + ErrParsingFailed = errors.New(errParsingFailed) + ErrUninitializeProperty = errors.New(errUninitializeProperty) + ErrFieldNotObject = errors.New("trying to access field on a non object type") + ErrValueTypeMismatch = errors.New("value does not match indicated type") + ErrIndexNotFound = errors.New("no index found for given ID") + ErrDocumentNotFound = errors.New("no document for the given key exists") + ErrInvalidUpdateTarget = errors.New("the target document to update is of invalid type") + ErrInvalidUpdater = errors.New("the updater of a document is of invalid type") + ErrInvalidDeleteTarget = errors.New("the target document to delete is of invalid type") + ErrMalformedDocKey = errors.New("malformed DocKey, missing either version or cid") + ErrInvalidDocKeyVersion = errors.New("invalid DocKey version") + ErrMaxTxnRetries = errors.New(errMaxTxnRetries) ) // NewErrFieldNotExist returns an error indicating that the given field does not exist. @@ -58,12 +56,6 @@ func NewErrFieldIndexNotExist(index int) error { return errors.New(errFieldNotExist, errors.NewKV("Index", index)) } -// NewErrSelectOfNonGroupField returns an error indicating that a non-group-by field -// was selected at group-level. -func NewErrSelectOfNonGroupField(name string) error { - return errors.New(errSelectOfNonGroupField, errors.NewKV("Field", name)) -} - // NewErrUnexpectedType returns an error indicating that the given value is of an unexpected type. func NewErrUnexpectedType[TExpected any](property string, actual any) error { var expected TExpected diff --git a/client/index.go b/client/index.go new file mode 100644 index 0000000000..47b52f00c5 --- /dev/null +++ b/client/index.go @@ -0,0 +1,39 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package client + +// IndexDirection is the direction of an index. +type IndexDirection string + +const ( + // Ascending is the value to use for an ascending fields + Ascending IndexDirection = "ASC" + // Descending is the value to use for an descending fields + Descending IndexDirection = "DESC" +) + +// IndexFieldDescription describes how a field is being indexed. +type IndexedFieldDescription struct { + // Name contains the name of the field. + Name string + // Direction contains the direction of the index. + Direction IndexDirection +} + +// IndexDescription describes an index. +type IndexDescription struct { + // Name contains the name of the index. + Name string + // ID is the local identifier of this index. + ID uint32 + // Fields contains the fields that are being indexed. + Fields []IndexedFieldDescription +} diff --git a/client/lens.go b/client/lens.go new file mode 100644 index 0000000000..1cffa19248 --- /dev/null +++ b/client/lens.go @@ -0,0 +1,83 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package client + +import ( + "context" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable/enumerable" + + "github.com/sourcenetwork/defradb/datastore" +) + +// LensConfig represents the configuration of a Lens migration in Defra. +type LensConfig struct { + // SourceSchemaVersionID is the ID of the schema version from which to migrate + // from. + // + // The source and destination versions must be next to each other in the history. + SourceSchemaVersionID string + + // DestinationSchemaVersionID is the ID of the schema version from which to migrate + // to. + // + // The source and destination versions must be next to each other in the history. + DestinationSchemaVersionID string + + // The configuration of the Lens module. + // + // For now, the wasm module must remain at the location specified as long as the + // migration is active. + model.Lens +} + +// LensRegistry exposes several useful thread-safe migration related functions which may +// be used to manage migrations. +type LensRegistry interface { + // SetMigration sets the migration for the given source-destination schema version IDs. Is equivilent to + // calling `Store.SetMigration(ctx, cfg)`. + // + // There may only be one migration per schema version id. If another migration was registered it will be + // overwritten by this migration. + // + // Neither of the schema version IDs specified in the configuration need to exist at the time of calling. + // This is to allow the migration of documents of schema versions unknown to the local node recieved by the + // P2P system. + // + // Migrations will only run if there is a complete path from the document schema version to the latest local + // schema version. + SetMigration(context.Context, datastore.Txn, LensConfig) error + + // ReloadLenses clears any cached migrations, loads their configurations from the database and re-initializes + // them. It is run on database start if the database already existed. + ReloadLenses(ctx context.Context, txn datastore.Txn) error + + // MigrateUp returns an enumerable that feeds the given source through the Lens migration for the given + // schema version id if one is found, if there is no matching migration the given source will be returned. + MigrateUp(enumerable.Enumerable[map[string]any], string) (enumerable.Enumerable[map[string]any], error) + + // MigrateDown returns an enumerable that feeds the given source through the Lens migration for the schema + // version that precedes the given schema version id in reverse, if one is found, if there is no matching + // migration the given source will be returned. + // + // This downgrades any documents in the source enumerable if/when enumerated. + MigrateDown(enumerable.Enumerable[map[string]any], string) (enumerable.Enumerable[map[string]any], error) + + // Config returns a slice of the configurations of the currently loaded migrations. + // + // Modifying the slice does not affect the loaded configurations. + Config() []LensConfig + + // HasMigration returns true if there is a migration registered for the given schema version id, otherwise + // will return false. + HasMigration(string) bool +} diff --git a/client/mocks/Collection.go b/client/mocks/Collection.go new file mode 100644 index 0000000000..a675cd1a17 --- /dev/null +++ b/client/mocks/Collection.go @@ -0,0 +1,1271 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + client "github.com/sourcenetwork/defradb/client" + + datastore "github.com/sourcenetwork/defradb/datastore" + + mock "github.com/stretchr/testify/mock" +) + +// Collection is an autogenerated mock type for the Collection type +type Collection struct { + mock.Mock +} + +type Collection_Expecter struct { + mock *mock.Mock +} + +func (_m *Collection) EXPECT() *Collection_Expecter { + return &Collection_Expecter{mock: &_m.Mock} +} + +// Create provides a mock function with given fields: _a0, _a1 +func (_m *Collection) Create(_a0 context.Context, _a1 *client.Document) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Collection_Create_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Create' +type Collection_Create_Call struct { + *mock.Call +} + +// Create is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *client.Document +func (_e *Collection_Expecter) Create(_a0 interface{}, _a1 interface{}) *Collection_Create_Call { + return &Collection_Create_Call{Call: _e.mock.On("Create", _a0, _a1)} +} + +func (_c *Collection_Create_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_Create_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*client.Document)) + }) + return _c +} + +func (_c *Collection_Create_Call) Return(_a0 error) *Collection_Create_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Create_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_Create_Call { + _c.Call.Return(run) + return _c +} + +// CreateIndex provides a mock function with given fields: _a0, _a1 +func (_m *Collection) CreateIndex(_a0 context.Context, _a1 client.IndexDescription) (client.IndexDescription, error) { + ret := _m.Called(_a0, _a1) + + var r0 client.IndexDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.IndexDescription) (client.IndexDescription, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, client.IndexDescription) client.IndexDescription); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(client.IndexDescription) + } + + if rf, ok := ret.Get(1).(func(context.Context, client.IndexDescription) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_CreateIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateIndex' +type Collection_CreateIndex_Call struct { + *mock.Call +} + +// CreateIndex is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.IndexDescription +func (_e *Collection_Expecter) CreateIndex(_a0 interface{}, _a1 interface{}) *Collection_CreateIndex_Call { + return &Collection_CreateIndex_Call{Call: _e.mock.On("CreateIndex", _a0, _a1)} +} + +func (_c *Collection_CreateIndex_Call) Run(run func(_a0 context.Context, _a1 client.IndexDescription)) *Collection_CreateIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.IndexDescription)) + }) + return _c +} + +func (_c *Collection_CreateIndex_Call) Return(_a0 client.IndexDescription, _a1 error) *Collection_CreateIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_CreateIndex_Call) RunAndReturn(run func(context.Context, client.IndexDescription) (client.IndexDescription, error)) *Collection_CreateIndex_Call { + _c.Call.Return(run) + return _c +} + +// CreateMany provides a mock function with given fields: _a0, _a1 +func (_m *Collection) CreateMany(_a0 context.Context, _a1 []*client.Document) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []*client.Document) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Collection_CreateMany_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateMany' +type Collection_CreateMany_Call struct { + *mock.Call +} + +// CreateMany is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []*client.Document +func (_e *Collection_Expecter) CreateMany(_a0 interface{}, _a1 interface{}) *Collection_CreateMany_Call { + return &Collection_CreateMany_Call{Call: _e.mock.On("CreateMany", _a0, _a1)} +} + +func (_c *Collection_CreateMany_Call) Run(run func(_a0 context.Context, _a1 []*client.Document)) *Collection_CreateMany_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]*client.Document)) + }) + return _c +} + +func (_c *Collection_CreateMany_Call) Return(_a0 error) *Collection_CreateMany_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_CreateMany_Call) RunAndReturn(run func(context.Context, []*client.Document) error) *Collection_CreateMany_Call { + _c.Call.Return(run) + return _c +} + +// Delete provides a mock function with given fields: _a0, _a1 +func (_m *Collection) Delete(_a0 context.Context, _a1 client.DocKey) (bool, error) { + ret := _m.Called(_a0, _a1) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) (bool, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, client.DocKey) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type Collection_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.DocKey +func (_e *Collection_Expecter) Delete(_a0 interface{}, _a1 interface{}) *Collection_Delete_Call { + return &Collection_Delete_Call{Call: _e.mock.On("Delete", _a0, _a1)} +} + +func (_c *Collection_Delete_Call) Run(run func(_a0 context.Context, _a1 client.DocKey)) *Collection_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.DocKey)) + }) + return _c +} + +func (_c *Collection_Delete_Call) Return(_a0 bool, _a1 error) *Collection_Delete_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_Delete_Call) RunAndReturn(run func(context.Context, client.DocKey) (bool, error)) *Collection_Delete_Call { + _c.Call.Return(run) + return _c +} + +// DeleteWith provides a mock function with given fields: ctx, target +func (_m *Collection) DeleteWith(ctx context.Context, target interface{}) (*client.DeleteResult, error) { + ret := _m.Called(ctx, target) + + var r0 *client.DeleteResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}) (*client.DeleteResult, error)); ok { + return rf(ctx, target) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}) *client.DeleteResult); ok { + r0 = rf(ctx, target) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.DeleteResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok { + r1 = rf(ctx, target) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_DeleteWith_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWith' +type Collection_DeleteWith_Call struct { + *mock.Call +} + +// DeleteWith is a helper method to define mock.On call +// - ctx context.Context +// - target interface{} +func (_e *Collection_Expecter) DeleteWith(ctx interface{}, target interface{}) *Collection_DeleteWith_Call { + return &Collection_DeleteWith_Call{Call: _e.mock.On("DeleteWith", ctx, target)} +} + +func (_c *Collection_DeleteWith_Call) Run(run func(ctx context.Context, target interface{})) *Collection_DeleteWith_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(interface{})) + }) + return _c +} + +func (_c *Collection_DeleteWith_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWith_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_DeleteWith_Call) RunAndReturn(run func(context.Context, interface{}) (*client.DeleteResult, error)) *Collection_DeleteWith_Call { + _c.Call.Return(run) + return _c +} + +// DeleteWithFilter provides a mock function with given fields: ctx, filter +func (_m *Collection) DeleteWithFilter(ctx context.Context, filter interface{}) (*client.DeleteResult, error) { + ret := _m.Called(ctx, filter) + + var r0 *client.DeleteResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}) (*client.DeleteResult, error)); ok { + return rf(ctx, filter) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}) *client.DeleteResult); ok { + r0 = rf(ctx, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.DeleteResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok { + r1 = rf(ctx, filter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_DeleteWithFilter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWithFilter' +type Collection_DeleteWithFilter_Call struct { + *mock.Call +} + +// DeleteWithFilter is a helper method to define mock.On call +// - ctx context.Context +// - filter interface{} +func (_e *Collection_Expecter) DeleteWithFilter(ctx interface{}, filter interface{}) *Collection_DeleteWithFilter_Call { + return &Collection_DeleteWithFilter_Call{Call: _e.mock.On("DeleteWithFilter", ctx, filter)} +} + +func (_c *Collection_DeleteWithFilter_Call) Run(run func(ctx context.Context, filter interface{})) *Collection_DeleteWithFilter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(interface{})) + }) + return _c +} + +func (_c *Collection_DeleteWithFilter_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWithFilter_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_DeleteWithFilter_Call) RunAndReturn(run func(context.Context, interface{}) (*client.DeleteResult, error)) *Collection_DeleteWithFilter_Call { + _c.Call.Return(run) + return _c +} + +// DeleteWithKey provides a mock function with given fields: _a0, _a1 +func (_m *Collection) DeleteWithKey(_a0 context.Context, _a1 client.DocKey) (*client.DeleteResult, error) { + ret := _m.Called(_a0, _a1) + + var r0 *client.DeleteResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) (*client.DeleteResult, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) *client.DeleteResult); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.DeleteResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, client.DocKey) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_DeleteWithKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWithKey' +type Collection_DeleteWithKey_Call struct { + *mock.Call +} + +// DeleteWithKey is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.DocKey +func (_e *Collection_Expecter) DeleteWithKey(_a0 interface{}, _a1 interface{}) *Collection_DeleteWithKey_Call { + return &Collection_DeleteWithKey_Call{Call: _e.mock.On("DeleteWithKey", _a0, _a1)} +} + +func (_c *Collection_DeleteWithKey_Call) Run(run func(_a0 context.Context, _a1 client.DocKey)) *Collection_DeleteWithKey_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.DocKey)) + }) + return _c +} + +func (_c *Collection_DeleteWithKey_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWithKey_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_DeleteWithKey_Call) RunAndReturn(run func(context.Context, client.DocKey) (*client.DeleteResult, error)) *Collection_DeleteWithKey_Call { + _c.Call.Return(run) + return _c +} + +// DeleteWithKeys provides a mock function with given fields: _a0, _a1 +func (_m *Collection) DeleteWithKeys(_a0 context.Context, _a1 []client.DocKey) (*client.DeleteResult, error) { + ret := _m.Called(_a0, _a1) + + var r0 *client.DeleteResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []client.DocKey) (*client.DeleteResult, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, []client.DocKey) *client.DeleteResult); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.DeleteResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []client.DocKey) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_DeleteWithKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWithKeys' +type Collection_DeleteWithKeys_Call struct { + *mock.Call +} + +// DeleteWithKeys is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []client.DocKey +func (_e *Collection_Expecter) DeleteWithKeys(_a0 interface{}, _a1 interface{}) *Collection_DeleteWithKeys_Call { + return &Collection_DeleteWithKeys_Call{Call: _e.mock.On("DeleteWithKeys", _a0, _a1)} +} + +func (_c *Collection_DeleteWithKeys_Call) Run(run func(_a0 context.Context, _a1 []client.DocKey)) *Collection_DeleteWithKeys_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]client.DocKey)) + }) + return _c +} + +func (_c *Collection_DeleteWithKeys_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWithKeys_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_DeleteWithKeys_Call) RunAndReturn(run func(context.Context, []client.DocKey) (*client.DeleteResult, error)) *Collection_DeleteWithKeys_Call { + _c.Call.Return(run) + return _c +} + +// Description provides a mock function with given fields: +func (_m *Collection) Description() client.CollectionDescription { + ret := _m.Called() + + var r0 client.CollectionDescription + if rf, ok := ret.Get(0).(func() client.CollectionDescription); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.CollectionDescription) + } + + return r0 +} + +// Collection_Description_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Description' +type Collection_Description_Call struct { + *mock.Call +} + +// Description is a helper method to define mock.On call +func (_e *Collection_Expecter) Description() *Collection_Description_Call { + return &Collection_Description_Call{Call: _e.mock.On("Description")} +} + +func (_c *Collection_Description_Call) Run(run func()) *Collection_Description_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_Description_Call) Return(_a0 client.CollectionDescription) *Collection_Description_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Description_Call) RunAndReturn(run func() client.CollectionDescription) *Collection_Description_Call { + _c.Call.Return(run) + return _c +} + +// DropIndex provides a mock function with given fields: ctx, indexName +func (_m *Collection) DropIndex(ctx context.Context, indexName string) error { + ret := _m.Called(ctx, indexName) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, indexName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Collection_DropIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DropIndex' +type Collection_DropIndex_Call struct { + *mock.Call +} + +// DropIndex is a helper method to define mock.On call +// - ctx context.Context +// - indexName string +func (_e *Collection_Expecter) DropIndex(ctx interface{}, indexName interface{}) *Collection_DropIndex_Call { + return &Collection_DropIndex_Call{Call: _e.mock.On("DropIndex", ctx, indexName)} +} + +func (_c *Collection_DropIndex_Call) Run(run func(ctx context.Context, indexName string)) *Collection_DropIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *Collection_DropIndex_Call) Return(_a0 error) *Collection_DropIndex_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_DropIndex_Call) RunAndReturn(run func(context.Context, string) error) *Collection_DropIndex_Call { + _c.Call.Return(run) + return _c +} + +// Exists provides a mock function with given fields: _a0, _a1 +func (_m *Collection) Exists(_a0 context.Context, _a1 client.DocKey) (bool, error) { + ret := _m.Called(_a0, _a1) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) (bool, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, client.DocKey) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_Exists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exists' +type Collection_Exists_Call struct { + *mock.Call +} + +// Exists is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.DocKey +func (_e *Collection_Expecter) Exists(_a0 interface{}, _a1 interface{}) *Collection_Exists_Call { + return &Collection_Exists_Call{Call: _e.mock.On("Exists", _a0, _a1)} +} + +func (_c *Collection_Exists_Call) Run(run func(_a0 context.Context, _a1 client.DocKey)) *Collection_Exists_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.DocKey)) + }) + return _c +} + +func (_c *Collection_Exists_Call) Return(_a0 bool, _a1 error) *Collection_Exists_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_Exists_Call) RunAndReturn(run func(context.Context, client.DocKey) (bool, error)) *Collection_Exists_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: ctx, key, showDeleted +func (_m *Collection) Get(ctx context.Context, key client.DocKey, showDeleted bool) (*client.Document, error) { + ret := _m.Called(ctx, key, showDeleted) + + var r0 *client.Document + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey, bool) (*client.Document, error)); ok { + return rf(ctx, key, showDeleted) + } + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey, bool) *client.Document); ok { + r0 = rf(ctx, key, showDeleted) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.Document) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, client.DocKey, bool) error); ok { + r1 = rf(ctx, key, showDeleted) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type Collection_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - key client.DocKey +// - showDeleted bool +func (_e *Collection_Expecter) Get(ctx interface{}, key interface{}, showDeleted interface{}) *Collection_Get_Call { + return &Collection_Get_Call{Call: _e.mock.On("Get", ctx, key, showDeleted)} +} + +func (_c *Collection_Get_Call) Run(run func(ctx context.Context, key client.DocKey, showDeleted bool)) *Collection_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.DocKey), args[2].(bool)) + }) + return _c +} + +func (_c *Collection_Get_Call) Return(_a0 *client.Document, _a1 error) *Collection_Get_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_Get_Call) RunAndReturn(run func(context.Context, client.DocKey, bool) (*client.Document, error)) *Collection_Get_Call { + _c.Call.Return(run) + return _c +} + +// GetAllDocKeys provides a mock function with given fields: ctx +func (_m *Collection) GetAllDocKeys(ctx context.Context) (<-chan client.DocKeysResult, error) { + ret := _m.Called(ctx) + + var r0 <-chan client.DocKeysResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan client.DocKeysResult, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan client.DocKeysResult); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan client.DocKeysResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_GetAllDocKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllDocKeys' +type Collection_GetAllDocKeys_Call struct { + *mock.Call +} + +// GetAllDocKeys is a helper method to define mock.On call +// - ctx context.Context +func (_e *Collection_Expecter) GetAllDocKeys(ctx interface{}) *Collection_GetAllDocKeys_Call { + return &Collection_GetAllDocKeys_Call{Call: _e.mock.On("GetAllDocKeys", ctx)} +} + +func (_c *Collection_GetAllDocKeys_Call) Run(run func(ctx context.Context)) *Collection_GetAllDocKeys_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Collection_GetAllDocKeys_Call) Return(_a0 <-chan client.DocKeysResult, _a1 error) *Collection_GetAllDocKeys_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_GetAllDocKeys_Call) RunAndReturn(run func(context.Context) (<-chan client.DocKeysResult, error)) *Collection_GetAllDocKeys_Call { + _c.Call.Return(run) + return _c +} + +// GetIndexes provides a mock function with given fields: ctx +func (_m *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { + ret := _m.Called(ctx) + + var r0 []client.IndexDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]client.IndexDescription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []client.IndexDescription); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]client.IndexDescription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_GetIndexes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetIndexes' +type Collection_GetIndexes_Call struct { + *mock.Call +} + +// GetIndexes is a helper method to define mock.On call +// - ctx context.Context +func (_e *Collection_Expecter) GetIndexes(ctx interface{}) *Collection_GetIndexes_Call { + return &Collection_GetIndexes_Call{Call: _e.mock.On("GetIndexes", ctx)} +} + +func (_c *Collection_GetIndexes_Call) Run(run func(ctx context.Context)) *Collection_GetIndexes_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Collection_GetIndexes_Call) Return(_a0 []client.IndexDescription, _a1 error) *Collection_GetIndexes_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_GetIndexes_Call) RunAndReturn(run func(context.Context) ([]client.IndexDescription, error)) *Collection_GetIndexes_Call { + _c.Call.Return(run) + return _c +} + +// ID provides a mock function with given fields: +func (_m *Collection) ID() uint32 { + ret := _m.Called() + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// Collection_ID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ID' +type Collection_ID_Call struct { + *mock.Call +} + +// ID is a helper method to define mock.On call +func (_e *Collection_Expecter) ID() *Collection_ID_Call { + return &Collection_ID_Call{Call: _e.mock.On("ID")} +} + +func (_c *Collection_ID_Call) Run(run func()) *Collection_ID_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_ID_Call) Return(_a0 uint32) *Collection_ID_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_ID_Call) RunAndReturn(run func() uint32) *Collection_ID_Call { + _c.Call.Return(run) + return _c +} + +// Name provides a mock function with given fields: +func (_m *Collection) Name() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Collection_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' +type Collection_Name_Call struct { + *mock.Call +} + +// Name is a helper method to define mock.On call +func (_e *Collection_Expecter) Name() *Collection_Name_Call { + return &Collection_Name_Call{Call: _e.mock.On("Name")} +} + +func (_c *Collection_Name_Call) Run(run func()) *Collection_Name_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_Name_Call) Return(_a0 string) *Collection_Name_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Name_Call) RunAndReturn(run func() string) *Collection_Name_Call { + _c.Call.Return(run) + return _c +} + +// Save provides a mock function with given fields: _a0, _a1 +func (_m *Collection) Save(_a0 context.Context, _a1 *client.Document) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Collection_Save_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Save' +type Collection_Save_Call struct { + *mock.Call +} + +// Save is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *client.Document +func (_e *Collection_Expecter) Save(_a0 interface{}, _a1 interface{}) *Collection_Save_Call { + return &Collection_Save_Call{Call: _e.mock.On("Save", _a0, _a1)} +} + +func (_c *Collection_Save_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_Save_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*client.Document)) + }) + return _c +} + +func (_c *Collection_Save_Call) Return(_a0 error) *Collection_Save_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Save_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_Save_Call { + _c.Call.Return(run) + return _c +} + +// Schema provides a mock function with given fields: +func (_m *Collection) Schema() client.SchemaDescription { + ret := _m.Called() + + var r0 client.SchemaDescription + if rf, ok := ret.Get(0).(func() client.SchemaDescription); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.SchemaDescription) + } + + return r0 +} + +// Collection_Schema_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Schema' +type Collection_Schema_Call struct { + *mock.Call +} + +// Schema is a helper method to define mock.On call +func (_e *Collection_Expecter) Schema() *Collection_Schema_Call { + return &Collection_Schema_Call{Call: _e.mock.On("Schema")} +} + +func (_c *Collection_Schema_Call) Run(run func()) *Collection_Schema_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_Schema_Call) Return(_a0 client.SchemaDescription) *Collection_Schema_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Schema_Call) RunAndReturn(run func() client.SchemaDescription) *Collection_Schema_Call { + _c.Call.Return(run) + return _c +} + +// SchemaID provides a mock function with given fields: +func (_m *Collection) SchemaID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Collection_SchemaID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SchemaID' +type Collection_SchemaID_Call struct { + *mock.Call +} + +// SchemaID is a helper method to define mock.On call +func (_e *Collection_Expecter) SchemaID() *Collection_SchemaID_Call { + return &Collection_SchemaID_Call{Call: _e.mock.On("SchemaID")} +} + +func (_c *Collection_SchemaID_Call) Run(run func()) *Collection_SchemaID_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_SchemaID_Call) Return(_a0 string) *Collection_SchemaID_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_SchemaID_Call) RunAndReturn(run func() string) *Collection_SchemaID_Call { + _c.Call.Return(run) + return _c +} + +// Update provides a mock function with given fields: _a0, _a1 +func (_m *Collection) Update(_a0 context.Context, _a1 *client.Document) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Collection_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type Collection_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *client.Document +func (_e *Collection_Expecter) Update(_a0 interface{}, _a1 interface{}) *Collection_Update_Call { + return &Collection_Update_Call{Call: _e.mock.On("Update", _a0, _a1)} +} + +func (_c *Collection_Update_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*client.Document)) + }) + return _c +} + +func (_c *Collection_Update_Call) Return(_a0 error) *Collection_Update_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Update_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_Update_Call { + _c.Call.Return(run) + return _c +} + +// UpdateWith provides a mock function with given fields: ctx, target, updater +func (_m *Collection) UpdateWith(ctx context.Context, target interface{}, updater string) (*client.UpdateResult, error) { + ret := _m.Called(ctx, target, updater) + + var r0 *client.UpdateResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) (*client.UpdateResult, error)); ok { + return rf(ctx, target, updater) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) *client.UpdateResult); ok { + r0 = rf(ctx, target, updater) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.UpdateResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}, string) error); ok { + r1 = rf(ctx, target, updater) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_UpdateWith_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWith' +type Collection_UpdateWith_Call struct { + *mock.Call +} + +// UpdateWith is a helper method to define mock.On call +// - ctx context.Context +// - target interface{} +// - updater string +func (_e *Collection_Expecter) UpdateWith(ctx interface{}, target interface{}, updater interface{}) *Collection_UpdateWith_Call { + return &Collection_UpdateWith_Call{Call: _e.mock.On("UpdateWith", ctx, target, updater)} +} + +func (_c *Collection_UpdateWith_Call) Run(run func(ctx context.Context, target interface{}, updater string)) *Collection_UpdateWith_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(interface{}), args[2].(string)) + }) + return _c +} + +func (_c *Collection_UpdateWith_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWith_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_UpdateWith_Call) RunAndReturn(run func(context.Context, interface{}, string) (*client.UpdateResult, error)) *Collection_UpdateWith_Call { + _c.Call.Return(run) + return _c +} + +// UpdateWithFilter provides a mock function with given fields: ctx, filter, updater +func (_m *Collection) UpdateWithFilter(ctx context.Context, filter interface{}, updater string) (*client.UpdateResult, error) { + ret := _m.Called(ctx, filter, updater) + + var r0 *client.UpdateResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) (*client.UpdateResult, error)); ok { + return rf(ctx, filter, updater) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) *client.UpdateResult); ok { + r0 = rf(ctx, filter, updater) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.UpdateResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}, string) error); ok { + r1 = rf(ctx, filter, updater) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_UpdateWithFilter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWithFilter' +type Collection_UpdateWithFilter_Call struct { + *mock.Call +} + +// UpdateWithFilter is a helper method to define mock.On call +// - ctx context.Context +// - filter interface{} +// - updater string +func (_e *Collection_Expecter) UpdateWithFilter(ctx interface{}, filter interface{}, updater interface{}) *Collection_UpdateWithFilter_Call { + return &Collection_UpdateWithFilter_Call{Call: _e.mock.On("UpdateWithFilter", ctx, filter, updater)} +} + +func (_c *Collection_UpdateWithFilter_Call) Run(run func(ctx context.Context, filter interface{}, updater string)) *Collection_UpdateWithFilter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(interface{}), args[2].(string)) + }) + return _c +} + +func (_c *Collection_UpdateWithFilter_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWithFilter_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_UpdateWithFilter_Call) RunAndReturn(run func(context.Context, interface{}, string) (*client.UpdateResult, error)) *Collection_UpdateWithFilter_Call { + _c.Call.Return(run) + return _c +} + +// UpdateWithKey provides a mock function with given fields: ctx, key, updater +func (_m *Collection) UpdateWithKey(ctx context.Context, key client.DocKey, updater string) (*client.UpdateResult, error) { + ret := _m.Called(ctx, key, updater) + + var r0 *client.UpdateResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey, string) (*client.UpdateResult, error)); ok { + return rf(ctx, key, updater) + } + if rf, ok := ret.Get(0).(func(context.Context, client.DocKey, string) *client.UpdateResult); ok { + r0 = rf(ctx, key, updater) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.UpdateResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, client.DocKey, string) error); ok { + r1 = rf(ctx, key, updater) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_UpdateWithKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWithKey' +type Collection_UpdateWithKey_Call struct { + *mock.Call +} + +// UpdateWithKey is a helper method to define mock.On call +// - ctx context.Context +// - key client.DocKey +// - updater string +func (_e *Collection_Expecter) UpdateWithKey(ctx interface{}, key interface{}, updater interface{}) *Collection_UpdateWithKey_Call { + return &Collection_UpdateWithKey_Call{Call: _e.mock.On("UpdateWithKey", ctx, key, updater)} +} + +func (_c *Collection_UpdateWithKey_Call) Run(run func(ctx context.Context, key client.DocKey, updater string)) *Collection_UpdateWithKey_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.DocKey), args[2].(string)) + }) + return _c +} + +func (_c *Collection_UpdateWithKey_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWithKey_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_UpdateWithKey_Call) RunAndReturn(run func(context.Context, client.DocKey, string) (*client.UpdateResult, error)) *Collection_UpdateWithKey_Call { + _c.Call.Return(run) + return _c +} + +// UpdateWithKeys provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Collection) UpdateWithKeys(_a0 context.Context, _a1 []client.DocKey, _a2 string) (*client.UpdateResult, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 *client.UpdateResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []client.DocKey, string) (*client.UpdateResult, error)); ok { + return rf(_a0, _a1, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, []client.DocKey, string) *client.UpdateResult); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.UpdateResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []client.DocKey, string) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Collection_UpdateWithKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWithKeys' +type Collection_UpdateWithKeys_Call struct { + *mock.Call +} + +// UpdateWithKeys is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []client.DocKey +// - _a2 string +func (_e *Collection_Expecter) UpdateWithKeys(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Collection_UpdateWithKeys_Call { + return &Collection_UpdateWithKeys_Call{Call: _e.mock.On("UpdateWithKeys", _a0, _a1, _a2)} +} + +func (_c *Collection_UpdateWithKeys_Call) Run(run func(_a0 context.Context, _a1 []client.DocKey, _a2 string)) *Collection_UpdateWithKeys_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]client.DocKey), args[2].(string)) + }) + return _c +} + +func (_c *Collection_UpdateWithKeys_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWithKeys_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Collection_UpdateWithKeys_Call) RunAndReturn(run func(context.Context, []client.DocKey, string) (*client.UpdateResult, error)) *Collection_UpdateWithKeys_Call { + _c.Call.Return(run) + return _c +} + +// WithTxn provides a mock function with given fields: _a0 +func (_m *Collection) WithTxn(_a0 datastore.Txn) client.Collection { + ret := _m.Called(_a0) + + var r0 client.Collection + if rf, ok := ret.Get(0).(func(datastore.Txn) client.Collection); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Collection) + } + } + + return r0 +} + +// Collection_WithTxn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithTxn' +type Collection_WithTxn_Call struct { + *mock.Call +} + +// WithTxn is a helper method to define mock.On call +// - _a0 datastore.Txn +func (_e *Collection_Expecter) WithTxn(_a0 interface{}) *Collection_WithTxn_Call { + return &Collection_WithTxn_Call{Call: _e.mock.On("WithTxn", _a0)} +} + +func (_c *Collection_WithTxn_Call) Run(run func(_a0 datastore.Txn)) *Collection_WithTxn_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(datastore.Txn)) + }) + return _c +} + +func (_c *Collection_WithTxn_Call) Return(_a0 client.Collection) *Collection_WithTxn_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_WithTxn_Call) RunAndReturn(run func(datastore.Txn) client.Collection) *Collection_WithTxn_Call { + _c.Call.Return(run) + return _c +} + +// NewCollection creates a new instance of Collection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCollection(t interface { + mock.TestingT + Cleanup(func()) +}) *Collection { + mock := &Collection{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/client/mocks/DB.go b/client/mocks/DB.go new file mode 100644 index 0000000000..82d53291da --- /dev/null +++ b/client/mocks/DB.go @@ -0,0 +1,1308 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + blockstore "github.com/ipfs/boxo/blockstore" + client "github.com/sourcenetwork/defradb/client" + + context "context" + + datastore "github.com/sourcenetwork/defradb/datastore" + + events "github.com/sourcenetwork/defradb/events" + + mock "github.com/stretchr/testify/mock" +) + +// DB is an autogenerated mock type for the DB type +type DB struct { + mock.Mock +} + +type DB_Expecter struct { + mock *mock.Mock +} + +func (_m *DB) EXPECT() *DB_Expecter { + return &DB_Expecter{mock: &_m.Mock} +} + +// AddP2PCollection provides a mock function with given fields: ctx, collectionID +func (_m *DB) AddP2PCollection(ctx context.Context, collectionID string) error { + ret := _m.Called(ctx, collectionID) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, collectionID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_AddP2PCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddP2PCollection' +type DB_AddP2PCollection_Call struct { + *mock.Call +} + +// AddP2PCollection is a helper method to define mock.On call +// - ctx context.Context +// - collectionID string +func (_e *DB_Expecter) AddP2PCollection(ctx interface{}, collectionID interface{}) *DB_AddP2PCollection_Call { + return &DB_AddP2PCollection_Call{Call: _e.mock.On("AddP2PCollection", ctx, collectionID)} +} + +func (_c *DB_AddP2PCollection_Call) Run(run func(ctx context.Context, collectionID string)) *DB_AddP2PCollection_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_AddP2PCollection_Call) Return(_a0 error) *DB_AddP2PCollection_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_AddP2PCollection_Call) RunAndReturn(run func(context.Context, string) error) *DB_AddP2PCollection_Call { + _c.Call.Return(run) + return _c +} + +// AddSchema provides a mock function with given fields: _a0, _a1 +func (_m *DB) AddSchema(_a0 context.Context, _a1 string) ([]client.CollectionDescription, error) { + ret := _m.Called(_a0, _a1) + + var r0 []client.CollectionDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.CollectionDescription, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []client.CollectionDescription); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]client.CollectionDescription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_AddSchema_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddSchema' +type DB_AddSchema_Call struct { + *mock.Call +} + +// AddSchema is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) AddSchema(_a0 interface{}, _a1 interface{}) *DB_AddSchema_Call { + return &DB_AddSchema_Call{Call: _e.mock.On("AddSchema", _a0, _a1)} +} + +func (_c *DB_AddSchema_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_AddSchema_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_AddSchema_Call) Return(_a0 []client.CollectionDescription, _a1 error) *DB_AddSchema_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_AddSchema_Call) RunAndReturn(run func(context.Context, string) ([]client.CollectionDescription, error)) *DB_AddSchema_Call { + _c.Call.Return(run) + return _c +} + +// BasicExport provides a mock function with given fields: ctx, config +func (_m *DB) BasicExport(ctx context.Context, config *client.BackupConfig) error { + ret := _m.Called(ctx, config) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *client.BackupConfig) error); ok { + r0 = rf(ctx, config) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_BasicExport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BasicExport' +type DB_BasicExport_Call struct { + *mock.Call +} + +// BasicExport is a helper method to define mock.On call +// - ctx context.Context +// - config *client.BackupConfig +func (_e *DB_Expecter) BasicExport(ctx interface{}, config interface{}) *DB_BasicExport_Call { + return &DB_BasicExport_Call{Call: _e.mock.On("BasicExport", ctx, config)} +} + +func (_c *DB_BasicExport_Call) Run(run func(ctx context.Context, config *client.BackupConfig)) *DB_BasicExport_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*client.BackupConfig)) + }) + return _c +} + +func (_c *DB_BasicExport_Call) Return(_a0 error) *DB_BasicExport_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_BasicExport_Call) RunAndReturn(run func(context.Context, *client.BackupConfig) error) *DB_BasicExport_Call { + _c.Call.Return(run) + return _c +} + +// BasicImport provides a mock function with given fields: ctx, filepath +func (_m *DB) BasicImport(ctx context.Context, filepath string) error { + ret := _m.Called(ctx, filepath) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, filepath) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_BasicImport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BasicImport' +type DB_BasicImport_Call struct { + *mock.Call +} + +// BasicImport is a helper method to define mock.On call +// - ctx context.Context +// - filepath string +func (_e *DB_Expecter) BasicImport(ctx interface{}, filepath interface{}) *DB_BasicImport_Call { + return &DB_BasicImport_Call{Call: _e.mock.On("BasicImport", ctx, filepath)} +} + +func (_c *DB_BasicImport_Call) Run(run func(ctx context.Context, filepath string)) *DB_BasicImport_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_BasicImport_Call) Return(_a0 error) *DB_BasicImport_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_BasicImport_Call) RunAndReturn(run func(context.Context, string) error) *DB_BasicImport_Call { + _c.Call.Return(run) + return _c +} + +// Blockstore provides a mock function with given fields: +func (_m *DB) Blockstore() blockstore.Blockstore { + ret := _m.Called() + + var r0 blockstore.Blockstore + if rf, ok := ret.Get(0).(func() blockstore.Blockstore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blockstore.Blockstore) + } + } + + return r0 +} + +// DB_Blockstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Blockstore' +type DB_Blockstore_Call struct { + *mock.Call +} + +// Blockstore is a helper method to define mock.On call +func (_e *DB_Expecter) Blockstore() *DB_Blockstore_Call { + return &DB_Blockstore_Call{Call: _e.mock.On("Blockstore")} +} + +func (_c *DB_Blockstore_Call) Run(run func()) *DB_Blockstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_Blockstore_Call) Return(_a0 blockstore.Blockstore) *DB_Blockstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_Blockstore_Call) RunAndReturn(run func() blockstore.Blockstore) *DB_Blockstore_Call { + _c.Call.Return(run) + return _c +} + +// Close provides a mock function with given fields: _a0 +func (_m *DB) Close(_a0 context.Context) { + _m.Called(_a0) +} + +// DB_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type DB_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +// - _a0 context.Context +func (_e *DB_Expecter) Close(_a0 interface{}) *DB_Close_Call { + return &DB_Close_Call{Call: _e.mock.On("Close", _a0)} +} + +func (_c *DB_Close_Call) Run(run func(_a0 context.Context)) *DB_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_Close_Call) Return() *DB_Close_Call { + _c.Call.Return() + return _c +} + +func (_c *DB_Close_Call) RunAndReturn(run func(context.Context)) *DB_Close_Call { + _c.Call.Return(run) + return _c +} + +// DeleteReplicator provides a mock function with given fields: ctx, rep +func (_m *DB) DeleteReplicator(ctx context.Context, rep client.Replicator) error { + ret := _m.Called(ctx, rep) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, client.Replicator) error); ok { + r0 = rf(ctx, rep) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_DeleteReplicator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteReplicator' +type DB_DeleteReplicator_Call struct { + *mock.Call +} + +// DeleteReplicator is a helper method to define mock.On call +// - ctx context.Context +// - rep client.Replicator +func (_e *DB_Expecter) DeleteReplicator(ctx interface{}, rep interface{}) *DB_DeleteReplicator_Call { + return &DB_DeleteReplicator_Call{Call: _e.mock.On("DeleteReplicator", ctx, rep)} +} + +func (_c *DB_DeleteReplicator_Call) Run(run func(ctx context.Context, rep client.Replicator)) *DB_DeleteReplicator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.Replicator)) + }) + return _c +} + +func (_c *DB_DeleteReplicator_Call) Return(_a0 error) *DB_DeleteReplicator_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_DeleteReplicator_Call) RunAndReturn(run func(context.Context, client.Replicator) error) *DB_DeleteReplicator_Call { + _c.Call.Return(run) + return _c +} + +// Events provides a mock function with given fields: +func (_m *DB) Events() events.Events { + ret := _m.Called() + + var r0 events.Events + if rf, ok := ret.Get(0).(func() events.Events); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(events.Events) + } + + return r0 +} + +// DB_Events_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Events' +type DB_Events_Call struct { + *mock.Call +} + +// Events is a helper method to define mock.On call +func (_e *DB_Expecter) Events() *DB_Events_Call { + return &DB_Events_Call{Call: _e.mock.On("Events")} +} + +func (_c *DB_Events_Call) Run(run func()) *DB_Events_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_Events_Call) Return(_a0 events.Events) *DB_Events_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_Events_Call) RunAndReturn(run func() events.Events) *DB_Events_Call { + _c.Call.Return(run) + return _c +} + +// ExecRequest provides a mock function with given fields: _a0, _a1 +func (_m *DB) ExecRequest(_a0 context.Context, _a1 string) *client.RequestResult { + ret := _m.Called(_a0, _a1) + + var r0 *client.RequestResult + if rf, ok := ret.Get(0).(func(context.Context, string) *client.RequestResult); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.RequestResult) + } + } + + return r0 +} + +// DB_ExecRequest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecRequest' +type DB_ExecRequest_Call struct { + *mock.Call +} + +// ExecRequest is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) ExecRequest(_a0 interface{}, _a1 interface{}) *DB_ExecRequest_Call { + return &DB_ExecRequest_Call{Call: _e.mock.On("ExecRequest", _a0, _a1)} +} + +func (_c *DB_ExecRequest_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_ExecRequest_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_ExecRequest_Call) Return(_a0 *client.RequestResult) *DB_ExecRequest_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_ExecRequest_Call) RunAndReturn(run func(context.Context, string) *client.RequestResult) *DB_ExecRequest_Call { + _c.Call.Return(run) + return _c +} + +// GetAllCollections provides a mock function with given fields: _a0 +func (_m *DB) GetAllCollections(_a0 context.Context) ([]client.Collection, error) { + ret := _m.Called(_a0) + + var r0 []client.Collection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]client.Collection, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) []client.Collection); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]client.Collection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetAllCollections_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllCollections' +type DB_GetAllCollections_Call struct { + *mock.Call +} + +// GetAllCollections is a helper method to define mock.On call +// - _a0 context.Context +func (_e *DB_Expecter) GetAllCollections(_a0 interface{}) *DB_GetAllCollections_Call { + return &DB_GetAllCollections_Call{Call: _e.mock.On("GetAllCollections", _a0)} +} + +func (_c *DB_GetAllCollections_Call) Run(run func(_a0 context.Context)) *DB_GetAllCollections_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_GetAllCollections_Call) Return(_a0 []client.Collection, _a1 error) *DB_GetAllCollections_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetAllCollections_Call) RunAndReturn(run func(context.Context) ([]client.Collection, error)) *DB_GetAllCollections_Call { + _c.Call.Return(run) + return _c +} + +// GetAllIndexes provides a mock function with given fields: _a0 +func (_m *DB) GetAllIndexes(_a0 context.Context) (map[string][]client.IndexDescription, error) { + ret := _m.Called(_a0) + + var r0 map[string][]client.IndexDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (map[string][]client.IndexDescription, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) map[string][]client.IndexDescription); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string][]client.IndexDescription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetAllIndexes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllIndexes' +type DB_GetAllIndexes_Call struct { + *mock.Call +} + +// GetAllIndexes is a helper method to define mock.On call +// - _a0 context.Context +func (_e *DB_Expecter) GetAllIndexes(_a0 interface{}) *DB_GetAllIndexes_Call { + return &DB_GetAllIndexes_Call{Call: _e.mock.On("GetAllIndexes", _a0)} +} + +func (_c *DB_GetAllIndexes_Call) Run(run func(_a0 context.Context)) *DB_GetAllIndexes_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_GetAllIndexes_Call) Return(_a0 map[string][]client.IndexDescription, _a1 error) *DB_GetAllIndexes_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetAllIndexes_Call) RunAndReturn(run func(context.Context) (map[string][]client.IndexDescription, error)) *DB_GetAllIndexes_Call { + _c.Call.Return(run) + return _c +} + +// GetAllP2PCollections provides a mock function with given fields: ctx +func (_m *DB) GetAllP2PCollections(ctx context.Context) ([]string, error) { + ret := _m.Called(ctx) + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []string); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetAllP2PCollections_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllP2PCollections' +type DB_GetAllP2PCollections_Call struct { + *mock.Call +} + +// GetAllP2PCollections is a helper method to define mock.On call +// - ctx context.Context +func (_e *DB_Expecter) GetAllP2PCollections(ctx interface{}) *DB_GetAllP2PCollections_Call { + return &DB_GetAllP2PCollections_Call{Call: _e.mock.On("GetAllP2PCollections", ctx)} +} + +func (_c *DB_GetAllP2PCollections_Call) Run(run func(ctx context.Context)) *DB_GetAllP2PCollections_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_GetAllP2PCollections_Call) Return(_a0 []string, _a1 error) *DB_GetAllP2PCollections_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetAllP2PCollections_Call) RunAndReturn(run func(context.Context) ([]string, error)) *DB_GetAllP2PCollections_Call { + _c.Call.Return(run) + return _c +} + +// GetAllReplicators provides a mock function with given fields: ctx +func (_m *DB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { + ret := _m.Called(ctx) + + var r0 []client.Replicator + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]client.Replicator, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []client.Replicator); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]client.Replicator) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetAllReplicators_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllReplicators' +type DB_GetAllReplicators_Call struct { + *mock.Call +} + +// GetAllReplicators is a helper method to define mock.On call +// - ctx context.Context +func (_e *DB_Expecter) GetAllReplicators(ctx interface{}) *DB_GetAllReplicators_Call { + return &DB_GetAllReplicators_Call{Call: _e.mock.On("GetAllReplicators", ctx)} +} + +func (_c *DB_GetAllReplicators_Call) Run(run func(ctx context.Context)) *DB_GetAllReplicators_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_GetAllReplicators_Call) Return(_a0 []client.Replicator, _a1 error) *DB_GetAllReplicators_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetAllReplicators_Call) RunAndReturn(run func(context.Context) ([]client.Replicator, error)) *DB_GetAllReplicators_Call { + _c.Call.Return(run) + return _c +} + +// GetCollectionByName provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetCollectionByName(_a0 context.Context, _a1 string) (client.Collection, error) { + ret := _m.Called(_a0, _a1) + + var r0 client.Collection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) client.Collection); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Collection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetCollectionByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionByName' +type DB_GetCollectionByName_Call struct { + *mock.Call +} + +// GetCollectionByName is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) GetCollectionByName(_a0 interface{}, _a1 interface{}) *DB_GetCollectionByName_Call { + return &DB_GetCollectionByName_Call{Call: _e.mock.On("GetCollectionByName", _a0, _a1)} +} + +func (_c *DB_GetCollectionByName_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionByName_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_GetCollectionByName_Call) Return(_a0 client.Collection, _a1 error) *DB_GetCollectionByName_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetCollectionByName_Call) RunAndReturn(run func(context.Context, string) (client.Collection, error)) *DB_GetCollectionByName_Call { + _c.Call.Return(run) + return _c +} + +// GetCollectionBySchemaID provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetCollectionBySchemaID(_a0 context.Context, _a1 string) (client.Collection, error) { + ret := _m.Called(_a0, _a1) + + var r0 client.Collection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) client.Collection); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Collection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetCollectionBySchemaID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionBySchemaID' +type DB_GetCollectionBySchemaID_Call struct { + *mock.Call +} + +// GetCollectionBySchemaID is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) GetCollectionBySchemaID(_a0 interface{}, _a1 interface{}) *DB_GetCollectionBySchemaID_Call { + return &DB_GetCollectionBySchemaID_Call{Call: _e.mock.On("GetCollectionBySchemaID", _a0, _a1)} +} + +func (_c *DB_GetCollectionBySchemaID_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionBySchemaID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_GetCollectionBySchemaID_Call) Return(_a0 client.Collection, _a1 error) *DB_GetCollectionBySchemaID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetCollectionBySchemaID_Call) RunAndReturn(run func(context.Context, string) (client.Collection, error)) *DB_GetCollectionBySchemaID_Call { + _c.Call.Return(run) + return _c +} + +// GetCollectionByVersionID provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetCollectionByVersionID(_a0 context.Context, _a1 string) (client.Collection, error) { + ret := _m.Called(_a0, _a1) + + var r0 client.Collection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) client.Collection); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Collection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetCollectionByVersionID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionByVersionID' +type DB_GetCollectionByVersionID_Call struct { + *mock.Call +} + +// GetCollectionByVersionID is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) GetCollectionByVersionID(_a0 interface{}, _a1 interface{}) *DB_GetCollectionByVersionID_Call { + return &DB_GetCollectionByVersionID_Call{Call: _e.mock.On("GetCollectionByVersionID", _a0, _a1)} +} + +func (_c *DB_GetCollectionByVersionID_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionByVersionID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_GetCollectionByVersionID_Call) Return(_a0 client.Collection, _a1 error) *DB_GetCollectionByVersionID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetCollectionByVersionID_Call) RunAndReturn(run func(context.Context, string) (client.Collection, error)) *DB_GetCollectionByVersionID_Call { + _c.Call.Return(run) + return _c +} + +// LensRegistry provides a mock function with given fields: +func (_m *DB) LensRegistry() client.LensRegistry { + ret := _m.Called() + + var r0 client.LensRegistry + if rf, ok := ret.Get(0).(func() client.LensRegistry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.LensRegistry) + } + } + + return r0 +} + +// DB_LensRegistry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LensRegistry' +type DB_LensRegistry_Call struct { + *mock.Call +} + +// LensRegistry is a helper method to define mock.On call +func (_e *DB_Expecter) LensRegistry() *DB_LensRegistry_Call { + return &DB_LensRegistry_Call{Call: _e.mock.On("LensRegistry")} +} + +func (_c *DB_LensRegistry_Call) Run(run func()) *DB_LensRegistry_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_LensRegistry_Call) Return(_a0 client.LensRegistry) *DB_LensRegistry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_LensRegistry_Call) RunAndReturn(run func() client.LensRegistry) *DB_LensRegistry_Call { + _c.Call.Return(run) + return _c +} + +// MaxTxnRetries provides a mock function with given fields: +func (_m *DB) MaxTxnRetries() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// DB_MaxTxnRetries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MaxTxnRetries' +type DB_MaxTxnRetries_Call struct { + *mock.Call +} + +// MaxTxnRetries is a helper method to define mock.On call +func (_e *DB_Expecter) MaxTxnRetries() *DB_MaxTxnRetries_Call { + return &DB_MaxTxnRetries_Call{Call: _e.mock.On("MaxTxnRetries")} +} + +func (_c *DB_MaxTxnRetries_Call) Run(run func()) *DB_MaxTxnRetries_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_MaxTxnRetries_Call) Return(_a0 int) *DB_MaxTxnRetries_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_MaxTxnRetries_Call) RunAndReturn(run func() int) *DB_MaxTxnRetries_Call { + _c.Call.Return(run) + return _c +} + +// NewConcurrentTxn provides a mock function with given fields: _a0, _a1 +func (_m *DB) NewConcurrentTxn(_a0 context.Context, _a1 bool) (datastore.Txn, error) { + ret := _m.Called(_a0, _a1) + + var r0 datastore.Txn + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, bool) datastore.Txn); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.Txn) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_NewConcurrentTxn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewConcurrentTxn' +type DB_NewConcurrentTxn_Call struct { + *mock.Call +} + +// NewConcurrentTxn is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 bool +func (_e *DB_Expecter) NewConcurrentTxn(_a0 interface{}, _a1 interface{}) *DB_NewConcurrentTxn_Call { + return &DB_NewConcurrentTxn_Call{Call: _e.mock.On("NewConcurrentTxn", _a0, _a1)} +} + +func (_c *DB_NewConcurrentTxn_Call) Run(run func(_a0 context.Context, _a1 bool)) *DB_NewConcurrentTxn_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(bool)) + }) + return _c +} + +func (_c *DB_NewConcurrentTxn_Call) Return(_a0 datastore.Txn, _a1 error) *DB_NewConcurrentTxn_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_NewConcurrentTxn_Call) RunAndReturn(run func(context.Context, bool) (datastore.Txn, error)) *DB_NewConcurrentTxn_Call { + _c.Call.Return(run) + return _c +} + +// NewTxn provides a mock function with given fields: _a0, _a1 +func (_m *DB) NewTxn(_a0 context.Context, _a1 bool) (datastore.Txn, error) { + ret := _m.Called(_a0, _a1) + + var r0 datastore.Txn + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, bool) datastore.Txn); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.Txn) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_NewTxn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewTxn' +type DB_NewTxn_Call struct { + *mock.Call +} + +// NewTxn is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 bool +func (_e *DB_Expecter) NewTxn(_a0 interface{}, _a1 interface{}) *DB_NewTxn_Call { + return &DB_NewTxn_Call{Call: _e.mock.On("NewTxn", _a0, _a1)} +} + +func (_c *DB_NewTxn_Call) Run(run func(_a0 context.Context, _a1 bool)) *DB_NewTxn_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(bool)) + }) + return _c +} + +func (_c *DB_NewTxn_Call) Return(_a0 datastore.Txn, _a1 error) *DB_NewTxn_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_NewTxn_Call) RunAndReturn(run func(context.Context, bool) (datastore.Txn, error)) *DB_NewTxn_Call { + _c.Call.Return(run) + return _c +} + +// PatchSchema provides a mock function with given fields: _a0, _a1 +func (_m *DB) PatchSchema(_a0 context.Context, _a1 string) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_PatchSchema_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchSchema' +type DB_PatchSchema_Call struct { + *mock.Call +} + +// PatchSchema is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) PatchSchema(_a0 interface{}, _a1 interface{}) *DB_PatchSchema_Call { + return &DB_PatchSchema_Call{Call: _e.mock.On("PatchSchema", _a0, _a1)} +} + +func (_c *DB_PatchSchema_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_PatchSchema_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_PatchSchema_Call) Return(_a0 error) *DB_PatchSchema_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string) error) *DB_PatchSchema_Call { + _c.Call.Return(run) + return _c +} + +// PrintDump provides a mock function with given fields: ctx +func (_m *DB) PrintDump(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_PrintDump_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PrintDump' +type DB_PrintDump_Call struct { + *mock.Call +} + +// PrintDump is a helper method to define mock.On call +// - ctx context.Context +func (_e *DB_Expecter) PrintDump(ctx interface{}) *DB_PrintDump_Call { + return &DB_PrintDump_Call{Call: _e.mock.On("PrintDump", ctx)} +} + +func (_c *DB_PrintDump_Call) Run(run func(ctx context.Context)) *DB_PrintDump_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DB_PrintDump_Call) Return(_a0 error) *DB_PrintDump_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_PrintDump_Call) RunAndReturn(run func(context.Context) error) *DB_PrintDump_Call { + _c.Call.Return(run) + return _c +} + +// RemoveP2PCollection provides a mock function with given fields: ctx, collectionID +func (_m *DB) RemoveP2PCollection(ctx context.Context, collectionID string) error { + ret := _m.Called(ctx, collectionID) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, collectionID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_RemoveP2PCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveP2PCollection' +type DB_RemoveP2PCollection_Call struct { + *mock.Call +} + +// RemoveP2PCollection is a helper method to define mock.On call +// - ctx context.Context +// - collectionID string +func (_e *DB_Expecter) RemoveP2PCollection(ctx interface{}, collectionID interface{}) *DB_RemoveP2PCollection_Call { + return &DB_RemoveP2PCollection_Call{Call: _e.mock.On("RemoveP2PCollection", ctx, collectionID)} +} + +func (_c *DB_RemoveP2PCollection_Call) Run(run func(ctx context.Context, collectionID string)) *DB_RemoveP2PCollection_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_RemoveP2PCollection_Call) Return(_a0 error) *DB_RemoveP2PCollection_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_RemoveP2PCollection_Call) RunAndReturn(run func(context.Context, string) error) *DB_RemoveP2PCollection_Call { + _c.Call.Return(run) + return _c +} + +// Root provides a mock function with given fields: +func (_m *DB) Root() datastore.RootStore { + ret := _m.Called() + + var r0 datastore.RootStore + if rf, ok := ret.Get(0).(func() datastore.RootStore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.RootStore) + } + } + + return r0 +} + +// DB_Root_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Root' +type DB_Root_Call struct { + *mock.Call +} + +// Root is a helper method to define mock.On call +func (_e *DB_Expecter) Root() *DB_Root_Call { + return &DB_Root_Call{Call: _e.mock.On("Root")} +} + +func (_c *DB_Root_Call) Run(run func()) *DB_Root_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *DB_Root_Call) Return(_a0 datastore.RootStore) *DB_Root_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_Root_Call) RunAndReturn(run func() datastore.RootStore) *DB_Root_Call { + _c.Call.Return(run) + return _c +} + +// SetMigration provides a mock function with given fields: _a0, _a1 +func (_m *DB) SetMigration(_a0 context.Context, _a1 client.LensConfig) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, client.LensConfig) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_SetMigration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetMigration' +type DB_SetMigration_Call struct { + *mock.Call +} + +// SetMigration is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.LensConfig +func (_e *DB_Expecter) SetMigration(_a0 interface{}, _a1 interface{}) *DB_SetMigration_Call { + return &DB_SetMigration_Call{Call: _e.mock.On("SetMigration", _a0, _a1)} +} + +func (_c *DB_SetMigration_Call) Run(run func(_a0 context.Context, _a1 client.LensConfig)) *DB_SetMigration_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.LensConfig)) + }) + return _c +} + +func (_c *DB_SetMigration_Call) Return(_a0 error) *DB_SetMigration_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_SetMigration_Call) RunAndReturn(run func(context.Context, client.LensConfig) error) *DB_SetMigration_Call { + _c.Call.Return(run) + return _c +} + +// SetReplicator provides a mock function with given fields: ctx, rep +func (_m *DB) SetReplicator(ctx context.Context, rep client.Replicator) error { + ret := _m.Called(ctx, rep) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, client.Replicator) error); ok { + r0 = rf(ctx, rep) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_SetReplicator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetReplicator' +type DB_SetReplicator_Call struct { + *mock.Call +} + +// SetReplicator is a helper method to define mock.On call +// - ctx context.Context +// - rep client.Replicator +func (_e *DB_Expecter) SetReplicator(ctx interface{}, rep interface{}) *DB_SetReplicator_Call { + return &DB_SetReplicator_Call{Call: _e.mock.On("SetReplicator", ctx, rep)} +} + +func (_c *DB_SetReplicator_Call) Run(run func(ctx context.Context, rep client.Replicator)) *DB_SetReplicator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(client.Replicator)) + }) + return _c +} + +func (_c *DB_SetReplicator_Call) Return(_a0 error) *DB_SetReplicator_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_SetReplicator_Call) RunAndReturn(run func(context.Context, client.Replicator) error) *DB_SetReplicator_Call { + _c.Call.Return(run) + return _c +} + +// WithTxn provides a mock function with given fields: _a0 +func (_m *DB) WithTxn(_a0 datastore.Txn) client.Store { + ret := _m.Called(_a0) + + var r0 client.Store + if rf, ok := ret.Get(0).(func(datastore.Txn) client.Store); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Store) + } + } + + return r0 +} + +// DB_WithTxn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithTxn' +type DB_WithTxn_Call struct { + *mock.Call +} + +// WithTxn is a helper method to define mock.On call +// - _a0 datastore.Txn +func (_e *DB_Expecter) WithTxn(_a0 interface{}) *DB_WithTxn_Call { + return &DB_WithTxn_Call{Call: _e.mock.On("WithTxn", _a0)} +} + +func (_c *DB_WithTxn_Call) Run(run func(_a0 datastore.Txn)) *DB_WithTxn_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(datastore.Txn)) + }) + return _c +} + +func (_c *DB_WithTxn_Call) Return(_a0 client.Store) *DB_WithTxn_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_WithTxn_Call) RunAndReturn(run func(datastore.Txn) client.Store) *DB_WithTxn_Call { + _c.Call.Return(run) + return _c +} + +// NewDB creates a new instance of DB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDB(t interface { + mock.TestingT + Cleanup(func()) +}) *DB { + mock := &DB{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/client/request/consts.go b/client/request/consts.go index 04ade35bc5..7287a49ac3 100644 --- a/client/request/consts.go +++ b/client/request/consts.go @@ -15,6 +15,10 @@ const ( // https://spec.graphql.org/October2021/#sec-Type-Name-Introspection TypeNameFieldName = "__typename" + // This is appended to the related object name to give us the field name + // that corresponds to the related object's join relation id, i.e. `Author_id`. + RelatedObjectID = "_id" + Cid = "cid" Data = "data" DocKey = "dockey" diff --git a/client/request/errors.go b/client/request/errors.go new file mode 100644 index 0000000000..e3c6b143f0 --- /dev/null +++ b/client/request/errors.go @@ -0,0 +1,33 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package request + +import ( + "github.com/sourcenetwork/defradb/errors" +) + +const ( + errSelectOfNonGroupField string = "cannot select a non-group-by field at group-level" +) + +// Errors returnable from this package. +// +// This list is incomplete and undefined errors may also be returned. +// Errors returned from this package may be tested against these errors with errors.Is. +var ( + ErrSelectOfNonGroupField = errors.New(errSelectOfNonGroupField) +) + +// NewErrSelectOfNonGroupField returns an error indicating that a non-group-by field +// was selected at group-level. +func NewErrSelectOfNonGroupField(name string) error { + return errors.New(errSelectOfNonGroupField, errors.NewKV("Field", name)) +} diff --git a/client/request/explain.go b/client/request/explain.go index ee8cb2b388..36d5df4cbe 100644 --- a/client/request/explain.go +++ b/client/request/explain.go @@ -17,4 +17,5 @@ type ExplainType string const ( SimpleExplain ExplainType = "simple" ExecuteExplain ExplainType = "execute" + DebugExplain ExplainType = "debug" ) diff --git a/client/request/select.go b/client/request/select.go index 0d09cad8dc..fb842228aa 100644 --- a/client/request/select.go +++ b/client/request/select.go @@ -12,8 +12,6 @@ package request import ( "github.com/sourcenetwork/immutable" - - "github.com/sourcenetwork/defradb/client" ) // SelectionType is the type of selection. @@ -89,14 +87,18 @@ func (s *Select) validateGroupBy() []error { } var fieldExistsInGroupBy bool + var isAliasFieldInGroupBy bool for _, groupByField := range s.GroupBy.Value().Fields { if typedChildSelection.Name == groupByField { fieldExistsInGroupBy = true break + } else if typedChildSelection.Name == groupByField+RelatedObjectID { + isAliasFieldInGroupBy = true + break } } - if !fieldExistsInGroupBy { - result = append(result, client.NewErrSelectOfNonGroupField(typedChildSelection.Name)) + if !fieldExistsInGroupBy && !isAliasFieldInGroupBy { + result = append(result, NewErrSelectOfNonGroupField(typedChildSelection.Name)) } default: // Do nothing diff --git a/config/config.go b/config/config.go index 524a9fe94f..e659dc0cbc 100644 --- a/config/config.go +++ b/config/config.go @@ -61,7 +61,6 @@ import ( badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" "github.com/sourcenetwork/defradb/logging" - "github.com/sourcenetwork/defradb/node" ) var log = logging.MustNewLogger("config") @@ -424,29 +423,6 @@ func (netcfg *NetConfig) RPCMaxConnectionIdleDuration() (time.Duration, error) { return d, nil } -// NodeConfig provides the Node-specific configuration, from the top-level Net config. -func (cfg *Config) NodeConfig() node.NodeOpt { - return func(opt *node.Options) error { - var err error - err = node.ListenP2PAddrStrings(cfg.Net.P2PAddress)(opt) - if err != nil { - return err - } - err = node.ListenTCPAddrString(cfg.Net.TCPAddress)(opt) - if err != nil { - return err - } - opt.EnableRelay = cfg.Net.RelayEnabled - opt.EnablePubSub = cfg.Net.PubSubEnabled - opt.DataPath = cfg.Datastore.Badger.Path - opt.ConnManager, err = node.NewConnManager(100, 400, time.Second*20) - if err != nil { - return err - } - return nil - } -} - // LogConfig configures output and logger. type LoggingConfig struct { Level string diff --git a/config/config_test.go b/config/config_test.go index 2ed3a3dec3..b7ff295efa 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -17,10 +17,7 @@ import ( "testing" "time" - ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/node" ) var envVarsDifferent = map[string]string{ @@ -224,47 +221,6 @@ func TestInvalidEnvVars(t *testing.T) { assert.ErrorIs(t, err, ErrLoadingConfig) } -func TestNodeConfig(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/9179" - cfg.Net.TCPAddress = "/ip4/0.0.0.0/tcp/9169" - cfg.Net.RPCTimeout = "100s" - cfg.Net.RPCMaxConnectionIdle = "111s" - cfg.Net.RelayEnabled = true - cfg.Net.PubSubEnabled = true - cfg.Datastore.Badger.Path = "/tmp/defra_cli/badger" - - err := cfg.validate() - assert.NoError(t, err) - - nodeConfig := cfg.NodeConfig() - options, errOptionsMerge := node.NewMergedOptions(nodeConfig) - - // confirming it provides the same config as a manually constructed node.Options - p2pAddr, errP2P := ma.NewMultiaddr(cfg.Net.P2PAddress) - tcpAddr, errTCP := ma.NewMultiaddr(cfg.Net.TCPAddress) - connManager, errConnManager := node.NewConnManager(100, 400, time.Second*20) - expectedOptions := node.Options{ - ListenAddrs: []ma.Multiaddr{p2pAddr}, - TCPAddr: tcpAddr, - DataPath: "/tmp/defra_cli/badger", - EnablePubSub: true, - EnableRelay: true, - ConnManager: connManager, - } - assert.NoError(t, errOptionsMerge) - assert.NoError(t, errP2P) - assert.NoError(t, errTCP) - assert.NoError(t, errConnManager) - for k, v := range options.ListenAddrs { - assert.Equal(t, expectedOptions.ListenAddrs[k], v) - } - assert.Equal(t, expectedOptions.TCPAddr.String(), options.TCPAddr.String()) - assert.Equal(t, expectedOptions.DataPath, options.DataPath) - assert.Equal(t, expectedOptions.EnablePubSub, options.EnablePubSub) - assert.Equal(t, expectedOptions.EnableRelay, options.EnableRelay) -} - func TestCreateAndLoadCustomConfig(t *testing.T) { testdir := t.TempDir() diff --git a/connor/connor.go b/connor/connor.go index 9f56041c6c..4b174bc45c 100644 --- a/connor/connor.go +++ b/connor/connor.go @@ -40,6 +40,8 @@ func matchWith(op string, conditions, data any) (bool, error) { return like(conditions, data) case "_nlike": return nlike(conditions, data) + case "_not": + return not(conditions, data) default: return false, NewErrUnknownOperator(op) } diff --git a/connor/not.go b/connor/not.go new file mode 100644 index 0000000000..96fcd87ff8 --- /dev/null +++ b/connor/not.go @@ -0,0 +1,11 @@ +package connor + +// not is an operator which performs object equality test +// and returns the inverse of the result. +func not(condition, data any) (bool, error) { + m, err := eq(condition, data) + if err != nil { + return false, err + } + return !m, nil +} diff --git a/connor/not_test.go b/connor/not_test.go new file mode 100644 index 0000000000..1a1dd785dd --- /dev/null +++ b/connor/not_test.go @@ -0,0 +1,50 @@ +package connor + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNot_WithNotAndNotNot_NoError(t *testing.T) { + const testString = "Source is the glue of web3" + + // not equal + result, err := not(testString, testString) + require.NoError(t, err) + require.False(t, result) + + // not not equal + result, err = not("Source is the glue", testString) + require.NoError(t, err) + require.True(t, result) +} + +func TestNot_WithEmptyCondition_ReturnError(t *testing.T) { + const testString = "Source is the glue of web3" + + _, err := not(map[FilterKey]any{&operator{"_some"}: "test"}, testString) + require.ErrorIs(t, err, ErrUnknownOperator) +} + +type operator struct { + // The filter operation string that this `operator`` represents. + // + // E.g. "_eq", or "_and". + Operation string +} + +func (k *operator) GetProp(data any) any { + return data +} + +func (k *operator) GetOperatorOrDefault(defaultOp string) string { + return k.Operation +} + +func (k *operator) Equal(other FilterKey) bool { + if otherKey, isOk := other.(*operator); isOk && *k == *otherKey { + return true + } + return false +} diff --git a/core/cid.go b/core/cid/cid.go similarity index 85% rename from core/cid.go rename to core/cid/cid.go index d9c8bbeea2..14367f4ae9 100644 --- a/core/cid.go +++ b/core/cid/cid.go @@ -8,22 +8,24 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package core +package cid import ( "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" ) -// NewSHA256CidV1 returns a new CIDv1 with the SHA256 multihash. -func NewSHA256CidV1(data []byte) (cid.Cid, error) { - pref := cid.Prefix{ +func NewDefaultSHA256PrefixV1() cid.Prefix { + return cid.Prefix{ Version: 1, Codec: cid.Raw, MhType: mh.SHA2_256, MhLength: -1, // default length } +} +// NewSHA256CidV1 returns a new CIDv1 with the SHA256 multihash. +func NewSHA256CidV1(data []byte) (cid.Cid, error) { // And then feed it some data - return pref.Sum(data) + return NewDefaultSHA256PrefixV1().Sum(data) } diff --git a/core/crdt/composite.go b/core/crdt/composite.go index e7ce96e867..ab6cbe95f5 100644 --- a/core/crdt/composite.go +++ b/core/crdt/composite.go @@ -154,13 +154,27 @@ func (c CompositeDAG) Merge(ctx context.Context, delta core.Delta, id string) er return c.deleteWithPrefix(ctx, c.key.WithValueFlag().WithFieldId("")) } - // ensure object marker exists - exists, err := c.store.Has(ctx, c.key.ToPrimaryDataStoreKey().ToDS()) + // We cannot rely on the dagDelta.Status here as it may have been deleted locally, this is not + // reflected in `dagDelta.Status` if sourced via P2P. Updates synced via P2P should not undelete + // the local reperesentation of the document. + versionKey := c.key.WithValueFlag().WithFieldId(core.DATASTORE_DOC_VERSION_FIELD_ID) + objectMarker, err := c.store.Get(ctx, c.key.ToPrimaryDataStoreKey().ToDS()) + hasObjectMarker := !errors.Is(err, ds.ErrNotFound) + if err != nil && hasObjectMarker { + return err + } + + if bytes.Equal(objectMarker, []byte{base.DeletedObjectMarker}) { + versionKey = versionKey.WithDeletedFlag() + } + + err = c.store.Put(ctx, versionKey.ToDS(), []byte(c.schemaVersionKey.SchemaVersionId)) if err != nil { return err } - if !exists { - // write object marker + + if !hasObjectMarker { + // ensure object marker exists return c.store.Put(ctx, c.key.ToPrimaryDataStoreKey().ToDS(), []byte{base.ObjectMarker}) } diff --git a/core/crdt/lwwreg.go b/core/crdt/lwwreg.go index bc6806e857..9ff5ec266c 100644 --- a/core/crdt/lwwreg.go +++ b/core/crdt/lwwreg.go @@ -114,8 +114,6 @@ func (reg LWWRegister) Value(ctx context.Context) ([]byte, error) { if err != nil { return nil, err } - // ignore the first byte (CRDT Type marker) from the returned value - buf = buf[1:] return buf, nil } @@ -187,9 +185,7 @@ func (reg LWWRegister) setValue(ctx context.Context, val []byte, priority uint64 } } - // prepend the value byte array with a single byte indicator for the CRDT Type. - buf := append([]byte{byte(client.LWW_REGISTER)}, val...) - err = reg.store.Put(ctx, key.ToDS(), buf) + err = reg.store.Put(ctx, key.ToDS(), val) if err != nil { return NewErrFailedToStoreValue(err) } diff --git a/core/data.go b/core/data.go index aee4cf64ed..a756d41f91 100644 --- a/core/data.go +++ b/core/data.go @@ -156,12 +156,6 @@ func NewSpans(spans ...Span) Spans { } } -// KeyValue is a KV store response containing the resulting core.Key and byte array value. -type KeyValue struct { - Key DataStoreKey - Value []byte -} - // HeadKeyValue is a KV store response containing the resulting core.HeadStoreKey // and byte array value. type HeadKeyValue struct { diff --git a/core/doc.go b/core/doc.go index 6966a8db4d..8f6700f50c 100644 --- a/core/doc.go +++ b/core/doc.go @@ -34,6 +34,9 @@ type Doc struct { Fields DocFields Status client.DocumentStatus + // The id of the schema version that this document is currently at. This includes + // any migrations that may have been run. + SchemaVersionID string } // GetKey returns the DocKey for this document. @@ -278,17 +281,5 @@ func (mapping *DocumentMapping) TryToFindNameFromIndex(targetIndex int) (string, } } - // Try to find the name of this index in the ChildMappings. - for _, childMapping := range mapping.ChildMappings { - if childMapping == nil { - continue - } - - name, found := childMapping.TryToFindNameFromIndex(targetIndex) - if found { - return name, true - } - } - return "", false } diff --git a/core/encoding.go b/core/encoding.go new file mode 100644 index 0000000000..9482acefbf --- /dev/null +++ b/core/encoding.go @@ -0,0 +1,170 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package core + +import ( + "fmt" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" +) + +// DecodeFieldValue takes a field value and description and converts it to the +// standardized Defra Go type. +func DecodeFieldValue(fieldDesc client.FieldDescription, val any) (any, error) { + if val == nil { + return nil, nil + } + + var err error + if array, isArray := val.([]any); isArray { + var ok bool + switch fieldDesc.Kind { + case client.FieldKind_BOOL_ARRAY: + boolArray := make([]bool, len(array)) + for i, untypedValue := range array { + boolArray[i], ok = untypedValue.(bool) + if !ok { + return nil, client.NewErrUnexpectedType[bool](fieldDesc.Name, untypedValue) + } + } + val = boolArray + + case client.FieldKind_NILLABLE_BOOL_ARRAY: + val, err = convertNillableArray[bool](fieldDesc.Name, array) + if err != nil { + return nil, err + } + + case client.FieldKind_INT_ARRAY: + intArray := make([]int64, len(array)) + for i, untypedValue := range array { + intArray[i], err = convertToInt(fmt.Sprintf("%s[%v]", fieldDesc.Name, i), untypedValue) + if err != nil { + return nil, err + } + } + val = intArray + + case client.FieldKind_NILLABLE_INT_ARRAY: + val, err = convertNillableArrayWithConverter(fieldDesc.Name, array, convertToInt) + if err != nil { + return nil, err + } + + case client.FieldKind_FLOAT_ARRAY: + floatArray := make([]float64, len(array)) + for i, untypedValue := range array { + floatArray[i], ok = untypedValue.(float64) + if !ok { + return nil, client.NewErrUnexpectedType[float64](fieldDesc.Name, untypedValue) + } + } + val = floatArray + + case client.FieldKind_NILLABLE_FLOAT_ARRAY: + val, err = convertNillableArray[float64](fieldDesc.Name, array) + if err != nil { + return nil, err + } + + case client.FieldKind_STRING_ARRAY: + stringArray := make([]string, len(array)) + for i, untypedValue := range array { + stringArray[i], ok = untypedValue.(string) + if !ok { + return nil, client.NewErrUnexpectedType[string](fieldDesc.Name, untypedValue) + } + } + val = stringArray + + case client.FieldKind_NILLABLE_STRING_ARRAY: + val, err = convertNillableArray[string](fieldDesc.Name, array) + if err != nil { + return nil, err + } + } + } else { // CBOR often encodes values typed as floats as ints + switch fieldDesc.Kind { + case client.FieldKind_FLOAT: + switch v := val.(type) { + case int64: + return float64(v), nil + case int: + return float64(v), nil + case uint64: + return float64(v), nil + case uint: + return float64(v), nil + } + case client.FieldKind_INT: + switch v := val.(type) { + case float64: + if v >= 0 { + return uint64(v), nil + } + return int64(v), nil + } + } + } + + return val, nil +} + +func convertNillableArray[T any](propertyName string, items []any) ([]immutable.Option[T], error) { + resultArray := make([]immutable.Option[T], len(items)) + for i, untypedValue := range items { + if untypedValue == nil { + resultArray[i] = immutable.None[T]() + continue + } + value, ok := untypedValue.(T) + if !ok { + return nil, client.NewErrUnexpectedType[T](fmt.Sprintf("%s[%v]", propertyName, i), untypedValue) + } + resultArray[i] = immutable.Some(value) + } + return resultArray, nil +} + +func convertNillableArrayWithConverter[TOut any]( + propertyName string, + items []any, + converter func(propertyName string, in any) (TOut, error), +) ([]immutable.Option[TOut], error) { + resultArray := make([]immutable.Option[TOut], len(items)) + for i, untypedValue := range items { + if untypedValue == nil { + resultArray[i] = immutable.None[TOut]() + continue + } + value, err := converter(fmt.Sprintf("%s[%v]", propertyName, i), untypedValue) + if err != nil { + return nil, err + } + resultArray[i] = immutable.Some(value) + } + return resultArray, nil +} + +func convertToInt(propertyName string, untypedValue any) (int64, error) { + switch value := untypedValue.(type) { + case uint64: + return int64(value), nil + case int64: + return value, nil + case float64: + return int64(value), nil + default: + return 0, client.NewErrUnexpectedType[string](propertyName, untypedValue) + } +} diff --git a/core/key.go b/core/key.go index 756290a607..a8ec5ece2b 100644 --- a/core/key.go +++ b/core/key.go @@ -41,13 +41,17 @@ const ( ) const ( - COLLECTION = "/collection/names" - COLLECTION_SCHEMA = "/collection/schema" - COLLECTION_SCHEMA_VERSION = "/collection/version" - SEQ = "/seq" - PRIMARY_KEY = "/pk" - REPLICATOR = "/replicator/id" - P2P_COLLECTION = "/p2p/collection" + COLLECTION = "/collection/names" + COLLECTION_SCHEMA = "/collection/schema" + COLLECTION_SCHEMA_VERSION = "/collection/version/v" + COLLECTION_SCHEMA_VERSION_HISTORY = "/collection/version/h" + COLLECTION_INDEX = "/collection/index" + SCHEMA_MIGRATION = "/schema/migration" + SEQ = "/seq" + PRIMARY_KEY = "/pk" + DATASTORE_DOC_VERSION_FIELD_ID = "v" + REPLICATOR = "/replicator/id" + P2P_COLLECTION = "/p2p/collection" ) // Key is an interface that represents a key in the database. @@ -67,6 +71,18 @@ type DataStoreKey struct { var _ Key = (*DataStoreKey)(nil) +// IndexDataStoreKey is key of an indexed document in the database. +type IndexDataStoreKey struct { + // CollectionID is the id of the collection + CollectionID uint32 + // IndexID is the id of the index + IndexID uint32 + // FieldValues is the values of the fields in the index + FieldValues [][]byte +} + +var _ Key = (*IndexDataStoreKey)(nil) + type PrimaryDataStoreKey struct { CollectionId string DocKey string @@ -106,6 +122,37 @@ type CollectionSchemaVersionKey struct { var _ Key = (*CollectionSchemaVersionKey)(nil) +// CollectionIndexKey to a stored description of an index +type CollectionIndexKey struct { + // CollectionName is the name of the collection that the index is on + CollectionName string + // IndexName is the name of the index + IndexName string +} + +var _ Key = (*CollectionIndexKey)(nil) + +// SchemaHistoryKey holds the pathway through the schema version history for +// any given schema. +// +// The key points to the schema version id of the next version of the schema. +// If a SchemaHistoryKey does not exist for a given SchemaVersionID it means +// that that SchemaVersionID is for the latest version. +type SchemaHistoryKey struct { + SchemaID string + PreviousSchemaVersionID string +} + +var _ Key = (*SchemaHistoryKey)(nil) + +// SchemaVersionMigrationKey points to the jsonified configuration of a lens migration +// for the given source schema version id. +type SchemaVersionMigrationKey struct { + SourceSchemaVersionID string +} + +var _ Key = (*SchemaVersionMigrationKey)(nil) + type P2PCollectionKey struct { CollectionID string } @@ -210,6 +257,80 @@ func NewCollectionSchemaVersionKey(schemaVersionId string) CollectionSchemaVersi return CollectionSchemaVersionKey{SchemaVersionId: schemaVersionId} } +// NewCollectionIndexKey creates a new CollectionIndexKey from a collection name and index name. +func NewCollectionIndexKey(colID, indexName string) CollectionIndexKey { + return CollectionIndexKey{CollectionName: colID, IndexName: indexName} +} + +// NewCollectionIndexKeyFromString creates a new CollectionIndexKey from a string. +// It expects the input string is in the following format: +// +// /collection/index/[CollectionName]/[IndexName] +// +// Where [IndexName] might be omitted. Anything else will return an error. +func NewCollectionIndexKeyFromString(key string) (CollectionIndexKey, error) { + keyArr := strings.Split(key, "/") + if len(keyArr) < 4 || len(keyArr) > 5 || keyArr[1] != "collection" || keyArr[2] != "index" { + return CollectionIndexKey{}, ErrInvalidKey + } + result := CollectionIndexKey{CollectionName: keyArr[3]} + if len(keyArr) == 5 { + result.IndexName = keyArr[4] + } + return result, nil +} + +// ToString returns the string representation of the key +// It is in the following format: +// /collection/index/[CollectionName]/[IndexName] +// if [CollectionName] is empty, the rest is ignored +func (k CollectionIndexKey) ToString() string { + result := COLLECTION_INDEX + + if k.CollectionName != "" { + result = result + "/" + k.CollectionName + if k.IndexName != "" { + result = result + "/" + k.IndexName + } + } + + return result +} + +// Bytes returns the byte representation of the key +func (k CollectionIndexKey) Bytes() []byte { + return []byte(k.ToString()) +} + +// ToDS returns the datastore key +func (k CollectionIndexKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +func NewSchemaHistoryKey(schemaId string, previousSchemaVersionID string) SchemaHistoryKey { + return SchemaHistoryKey{ + SchemaID: schemaId, + PreviousSchemaVersionID: previousSchemaVersionID, + } +} + +func NewSchemaVersionMigrationKey(schemaVersionID string) SchemaVersionMigrationKey { + return SchemaVersionMigrationKey{SourceSchemaVersionID: schemaVersionID} +} + +func NewSchemaHistoryKeyFromString(keyString string) (SchemaHistoryKey, error) { + keyString = strings.TrimPrefix(keyString, COLLECTION_SCHEMA_VERSION_HISTORY+"/") + elements := strings.Split(keyString, "/") + if len(elements) != 2 { + return SchemaHistoryKey{}, ErrInvalidKey + } + + return SchemaHistoryKey{ + SchemaID: elements[0], + PreviousSchemaVersionID: elements[1], + }, nil +} + func NewSequenceKey(name string) SequenceKey { return SequenceKey{SequenceName: name} } @@ -318,6 +439,109 @@ func (k DataStoreKey) ToPrimaryDataStoreKey() PrimaryDataStoreKey { } } +// NewIndexDataStoreKey creates a new IndexDataStoreKey from a string. +// It expects the input string is in the following format: +// +// /[CollectionID]/[IndexID]/[FieldValue](/[FieldValue]...) +// +// Where [CollectionID] and [IndexID] are integers +func NewIndexDataStoreKey(key string) (IndexDataStoreKey, error) { + if key == "" { + return IndexDataStoreKey{}, ErrEmptyKey + } + + if !strings.HasPrefix(key, "/") { + return IndexDataStoreKey{}, ErrInvalidKey + } + + elements := strings.Split(key[1:], "/") + + // With less than 3 elements, we know it's an invalid key + if len(elements) < 3 { + return IndexDataStoreKey{}, ErrInvalidKey + } + + colID, err := strconv.Atoi(elements[0]) + if err != nil { + return IndexDataStoreKey{}, ErrInvalidKey + } + + indexKey := IndexDataStoreKey{CollectionID: uint32(colID)} + + indID, err := strconv.Atoi(elements[1]) + if err != nil { + return IndexDataStoreKey{}, ErrInvalidKey + } + indexKey.IndexID = uint32(indID) + + // first 2 elements are the collection and index IDs, the rest are field values + for i := 2; i < len(elements); i++ { + indexKey.FieldValues = append(indexKey.FieldValues, []byte(elements[i])) + } + + return indexKey, nil +} + +// Bytes returns the byte representation of the key +func (k *IndexDataStoreKey) Bytes() []byte { + return []byte(k.ToString()) +} + +// ToDS returns the datastore key +func (k *IndexDataStoreKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +// ToString returns the string representation of the key +// It is in the following format: +// /[CollectionID]/[IndexID]/[FieldValue](/[FieldValue]...) +// If while composing the string from left to right, a component +// is empty, the string is returned up to that point +func (k *IndexDataStoreKey) ToString() string { + sb := strings.Builder{} + + if k.CollectionID == 0 { + return "" + } + sb.WriteByte('/') + sb.WriteString(strconv.Itoa(int(k.CollectionID))) + + if k.IndexID == 0 { + return sb.String() + } + sb.WriteByte('/') + sb.WriteString(strconv.Itoa(int(k.IndexID))) + + for _, v := range k.FieldValues { + if len(v) == 0 { + break + } + sb.WriteByte('/') + sb.WriteString(string(v)) + } + + return sb.String() +} + +// Equal returns true if the two keys are equal +func (k IndexDataStoreKey) Equal(other IndexDataStoreKey) bool { + if k.CollectionID != other.CollectionID { + return false + } + if k.IndexID != other.IndexID { + return false + } + if len(k.FieldValues) != len(other.FieldValues) { + return false + } + for i := range k.FieldValues { + if string(k.FieldValues[i]) != string(other.FieldValues[i]) { + return false + } + } + return true +} + func (k PrimaryDataStoreKey) ToDataStoreKey() DataStoreKey { return DataStoreKey{ CollectionID: k.CollectionId, @@ -401,6 +625,46 @@ func (k CollectionSchemaVersionKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } +func (k SchemaHistoryKey) ToString() string { + result := COLLECTION_SCHEMA_VERSION_HISTORY + + if k.SchemaID != "" { + result = result + "/" + k.SchemaID + } + + if k.PreviousSchemaVersionID != "" { + result = result + "/" + k.PreviousSchemaVersionID + } + + return result +} + +func (k SchemaHistoryKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k SchemaHistoryKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +func (k SchemaVersionMigrationKey) ToString() string { + result := SCHEMA_MIGRATION + + if k.SourceSchemaVersionID != "" { + result = result + "/" + k.SourceSchemaVersionID + } + + return result +} + +func (k SchemaVersionMigrationKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k SchemaVersionMigrationKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + func (k SequenceKey) ToString() string { result := SEQ diff --git a/core/key_test.go b/core/key_test.go index 865ece9c26..d22498bd8c 100644 --- a/core/key_test.go +++ b/core/key_test.go @@ -13,6 +13,7 @@ package core import ( "testing" + ds "github.com/ipfs/go-datastore" "github.com/stretchr/testify/assert" ) @@ -107,3 +108,316 @@ func TestNewDataStoreKey_GivenAStringWithExtraSuffix(t *testing.T) { assert.ErrorIs(t, ErrInvalidKey, err) } + +func TestNewIndexKey_IfEmptyParam_ReturnPrefix(t *testing.T) { + key := NewCollectionIndexKey("", "") + assert.Equal(t, "/collection/index", key.ToString()) +} + +func TestNewIndexKey_IfParamsAreGiven_ReturnFullKey(t *testing.T) { + key := NewCollectionIndexKey("col", "idx") + assert.Equal(t, "/collection/index/col/idx", key.ToString()) +} + +func TestNewIndexKey_InNoCollectionName_ReturnJustPrefix(t *testing.T) { + key := NewCollectionIndexKey("", "idx") + assert.Equal(t, "/collection/index", key.ToString()) +} + +func TestNewIndexKey_InNoIndexName_ReturnWithoutIndexName(t *testing.T) { + key := NewCollectionIndexKey("col", "") + assert.Equal(t, "/collection/index/col", key.ToString()) +} + +func TestNewIndexKeyFromString_IfInvalidString_ReturnError(t *testing.T) { + for _, key := range []string{ + "", + "/collection", + "/collection/index", + "/collection/index/col/idx/extra", + "/wrong/index/col/idx", + "/collection/wrong/col/idx", + } { + _, err := NewCollectionIndexKeyFromString(key) + assert.ErrorIs(t, err, ErrInvalidKey) + } +} + +func TestNewIndexKeyFromString_IfOnlyCollectionName_ReturnKey(t *testing.T) { + key, err := NewCollectionIndexKeyFromString("/collection/index/col") + assert.NoError(t, err) + assert.Equal(t, key.CollectionName, "col") + assert.Equal(t, key.IndexName, "") +} + +func TestNewIndexKeyFromString_IfFullKeyString_ReturnKey(t *testing.T) { + key, err := NewCollectionIndexKeyFromString("/collection/index/col/idx") + assert.NoError(t, err) + assert.Equal(t, key.CollectionName, "col") + assert.Equal(t, key.IndexName, "idx") +} + +func toFieldValues(values ...string) [][]byte { + var result [][]byte = make([][]byte, 0, len(values)) + for _, value := range values { + result = append(result, []byte(value)) + } + return result +} + +func TestIndexDatastoreKey_ToString(t *testing.T) { + cases := []struct { + Key IndexDataStoreKey + Expected string + }{ + { + Key: IndexDataStoreKey{}, + Expected: "", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + }, + Expected: "/1", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + }, + Expected: "/1/2", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3"), + }, + Expected: "/1/2/3", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }, + Expected: "/1/2/3/4", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + FieldValues: toFieldValues("3"), + }, + Expected: "/1", + }, + { + Key: IndexDataStoreKey{ + IndexID: 2, + FieldValues: toFieldValues("3"), + }, + Expected: "", + }, + { + Key: IndexDataStoreKey{ + FieldValues: toFieldValues("3"), + }, + Expected: "", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("", ""), + }, + Expected: "/1/2", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("", "3"), + }, + Expected: "/1/2", + }, + { + Key: IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "", "4"), + }, + Expected: "/1/2/3", + }, + } + for i, c := range cases { + assert.Equal(t, c.Key.ToString(), c.Expected, "case %d", i) + } +} + +func TestIndexDatastoreKey_Bytes(t *testing.T) { + key := IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + } + assert.Equal(t, key.Bytes(), []byte("/1/2/3/4")) +} + +func TestIndexDatastoreKey_ToDS(t *testing.T) { + key := IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + } + assert.Equal(t, key.ToDS(), ds.NewKey("/1/2/3/4")) +} + +func TestIndexDatastoreKey_EqualTrue(t *testing.T) { + cases := [][]IndexDataStoreKey{ + { + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }, + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }, + }, + { + { + CollectionID: 1, + FieldValues: toFieldValues("3", "4"), + }, + { + CollectionID: 1, + FieldValues: toFieldValues("3", "4"), + }, + }, + { + { + CollectionID: 1, + }, + { + CollectionID: 1, + }, + }, + } + + for i, c := range cases { + assert.True(t, c[0].Equal(c[1]), "case %d", i) + } +} + +func TestCollectionIndexKey_Bytes(t *testing.T) { + key := CollectionIndexKey{ + CollectionName: "col", + IndexName: "idx", + } + assert.Equal(t, []byte(COLLECTION_INDEX+"/col/idx"), key.Bytes()) +} + +func TestIndexDatastoreKey_EqualFalse(t *testing.T) { + cases := [][]IndexDataStoreKey{ + { + { + CollectionID: 1, + }, + { + CollectionID: 2, + }, + }, + { + { + CollectionID: 1, + IndexID: 2, + }, + { + CollectionID: 1, + IndexID: 3, + }, + }, + { + { + CollectionID: 1, + }, + { + IndexID: 1, + }, + }, + { + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("4", "3"), + }, + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }, + }, + { + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3"), + }, + { + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }, + }, + { + { + CollectionID: 1, + FieldValues: toFieldValues("3", "", "4"), + }, + { + CollectionID: 1, + FieldValues: toFieldValues("3", "4"), + }, + }, + } + + for i, c := range cases { + assert.False(t, c[0].Equal(c[1]), "case %d", i) + } +} + +func TestNewIndexDataStoreKey_ValidKey(t *testing.T) { + str, err := NewIndexDataStoreKey("/1/2/3") + assert.NoError(t, err) + assert.Equal(t, str, IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3"), + }) + + str, err = NewIndexDataStoreKey("/1/2/3/4") + assert.NoError(t, err) + assert.Equal(t, str, IndexDataStoreKey{ + CollectionID: 1, + IndexID: 2, + FieldValues: toFieldValues("3", "4"), + }) +} + +func TestNewIndexDataStoreKey_InvalidKey(t *testing.T) { + keys := []string{ + "", + "/", + "/1", + "/1/2", + " /1/2/3", + "1/2/3", + "/a/2/3", + "/1/b/3", + } + for i, key := range keys { + _, err := NewIndexDataStoreKey(key) + assert.Error(t, err, "case %d: %s", i, key) + } +} diff --git a/core/net/protocol.go b/core/net/protocol.go index c405f95f25..82024bde81 100644 --- a/core/net/protocol.go +++ b/core/net/protocol.go @@ -15,7 +15,7 @@ import ( ma "github.com/multiformats/go-multiaddr" ) -// DefraDB's p2p protocol information (https://docs.libp2p.io/concepts/protocols/). +// DefraDB's P2P protocol information (https://docs.libp2p.io/concepts/protocols/). const ( // Name is the protocol slug, the codename representing it. diff --git a/datastore/blockstore_test.go b/datastore/blockstore_test.go index b3861cb7dc..81e086c99f 100644 --- a/datastore/blockstore_test.go +++ b/datastore/blockstore_test.go @@ -17,9 +17,10 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" + ccid "github.com/sourcenetwork/defradb/core/cid" + "github.com/sourcenetwork/defradb/datastore/memory" ) @@ -28,20 +29,6 @@ var ( data2 = []byte("SourceHub") ) -// Adding this here to avoid circular dependency datastore->core->datastore. -// The culprit is `core.Parser`. -func newSHA256CidV1(data []byte) (cid.Cid, error) { - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } - - // And then feed it some data - return pref.Sum(data) -} - func TestBStoreGet(t *testing.T) { ctx := context.Background() rootstore := memory.NewDatastore(ctx) @@ -51,7 +38,7 @@ func TestBStoreGet(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) @@ -73,7 +60,7 @@ func TestBStoreGetWithUndefinedCID(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) @@ -93,7 +80,7 @@ func TestBStoreGetWithStoreClosed(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) @@ -118,7 +105,7 @@ func TestBStoreGetWithReHash(t *testing.T) { bs.HashOnRead(true) - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) @@ -140,12 +127,12 @@ func TestPutMany(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) - cID2, err := newSHA256CidV1(data2) + cID2, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b2, err := blocks.NewBlockWithCid(data2, cID2) require.NoError(t, err) @@ -163,7 +150,7 @@ func TestPutManyWithExists(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) @@ -171,7 +158,7 @@ func TestPutManyWithExists(t *testing.T) { err = bs.Put(ctx, b) require.NoError(t, err) - cID2, err := newSHA256CidV1(data2) + cID2, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b2, err := blocks.NewBlockWithCid(data2, cID2) require.NoError(t, err) @@ -189,12 +176,12 @@ func TestPutManyWithStoreClosed(t *testing.T) { store: dsRW, } - cID, err := newSHA256CidV1(data) + cID, err := ccid.NewSHA256CidV1(data) require.NoError(t, err) b, err := blocks.NewBlockWithCid(data, cID) require.NoError(t, err) - cID2, err := newSHA256CidV1(data2) + cID2, err := ccid.NewSHA256CidV1(data2) require.NoError(t, err) b2, err := blocks.NewBlockWithCid(data2, cID2) require.NoError(t, err) diff --git a/datastore/mocks/DAGStore.go b/datastore/mocks/DAGStore.go new file mode 100644 index 0000000000..1ca7d96d7b --- /dev/null +++ b/datastore/mocks/DAGStore.go @@ -0,0 +1,416 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// DAGStore is an autogenerated mock type for the DAGStore type +type DAGStore struct { + mock.Mock +} + +type DAGStore_Expecter struct { + mock *mock.Mock +} + +func (_m *DAGStore) EXPECT() *DAGStore_Expecter { + return &DAGStore_Expecter{mock: &_m.Mock} +} + +// AllKeysChan provides a mock function with given fields: ctx +func (_m *DAGStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + ret := _m.Called(ctx) + + var r0 <-chan cid.Cid + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan cid.Cid, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan cid.Cid); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan cid.Cid) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DAGStore_AllKeysChan_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllKeysChan' +type DAGStore_AllKeysChan_Call struct { + *mock.Call +} + +// AllKeysChan is a helper method to define mock.On call +// - ctx context.Context +func (_e *DAGStore_Expecter) AllKeysChan(ctx interface{}) *DAGStore_AllKeysChan_Call { + return &DAGStore_AllKeysChan_Call{Call: _e.mock.On("AllKeysChan", ctx)} +} + +func (_c *DAGStore_AllKeysChan_Call) Run(run func(ctx context.Context)) *DAGStore_AllKeysChan_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *DAGStore_AllKeysChan_Call) Return(_a0 <-chan cid.Cid, _a1 error) *DAGStore_AllKeysChan_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DAGStore_AllKeysChan_Call) RunAndReturn(run func(context.Context) (<-chan cid.Cid, error)) *DAGStore_AllKeysChan_Call { + _c.Call.Return(run) + return _c +} + +// DeleteBlock provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) DeleteBlock(_a0 context.Context, _a1 cid.Cid) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DAGStore_DeleteBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteBlock' +type DAGStore_DeleteBlock_Call struct { + *mock.Call +} + +// DeleteBlock is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 cid.Cid +func (_e *DAGStore_Expecter) DeleteBlock(_a0 interface{}, _a1 interface{}) *DAGStore_DeleteBlock_Call { + return &DAGStore_DeleteBlock_Call{Call: _e.mock.On("DeleteBlock", _a0, _a1)} +} + +func (_c *DAGStore_DeleteBlock_Call) Run(run func(_a0 context.Context, _a1 cid.Cid)) *DAGStore_DeleteBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cid.Cid)) + }) + return _c +} + +func (_c *DAGStore_DeleteBlock_Call) Return(_a0 error) *DAGStore_DeleteBlock_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DAGStore_DeleteBlock_Call) RunAndReturn(run func(context.Context, cid.Cid) error) *DAGStore_DeleteBlock_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) Get(_a0 context.Context, _a1 cid.Cid) (blocks.Block, error) { + ret := _m.Called(_a0, _a1) + + var r0 blocks.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (blocks.Block, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) blocks.Block); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blocks.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DAGStore_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type DAGStore_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 cid.Cid +func (_e *DAGStore_Expecter) Get(_a0 interface{}, _a1 interface{}) *DAGStore_Get_Call { + return &DAGStore_Get_Call{Call: _e.mock.On("Get", _a0, _a1)} +} + +func (_c *DAGStore_Get_Call) Run(run func(_a0 context.Context, _a1 cid.Cid)) *DAGStore_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cid.Cid)) + }) + return _c +} + +func (_c *DAGStore_Get_Call) Return(_a0 blocks.Block, _a1 error) *DAGStore_Get_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DAGStore_Get_Call) RunAndReturn(run func(context.Context, cid.Cid) (blocks.Block, error)) *DAGStore_Get_Call { + _c.Call.Return(run) + return _c +} + +// GetSize provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) GetSize(_a0 context.Context, _a1 cid.Cid) (int, error) { + ret := _m.Called(_a0, _a1) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (int, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) int); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DAGStore_GetSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSize' +type DAGStore_GetSize_Call struct { + *mock.Call +} + +// GetSize is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 cid.Cid +func (_e *DAGStore_Expecter) GetSize(_a0 interface{}, _a1 interface{}) *DAGStore_GetSize_Call { + return &DAGStore_GetSize_Call{Call: _e.mock.On("GetSize", _a0, _a1)} +} + +func (_c *DAGStore_GetSize_Call) Run(run func(_a0 context.Context, _a1 cid.Cid)) *DAGStore_GetSize_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cid.Cid)) + }) + return _c +} + +func (_c *DAGStore_GetSize_Call) Return(_a0 int, _a1 error) *DAGStore_GetSize_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DAGStore_GetSize_Call) RunAndReturn(run func(context.Context, cid.Cid) (int, error)) *DAGStore_GetSize_Call { + _c.Call.Return(run) + return _c +} + +// Has provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) Has(_a0 context.Context, _a1 cid.Cid) (bool, error) { + ret := _m.Called(_a0, _a1) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (bool, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DAGStore_Has_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Has' +type DAGStore_Has_Call struct { + *mock.Call +} + +// Has is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 cid.Cid +func (_e *DAGStore_Expecter) Has(_a0 interface{}, _a1 interface{}) *DAGStore_Has_Call { + return &DAGStore_Has_Call{Call: _e.mock.On("Has", _a0, _a1)} +} + +func (_c *DAGStore_Has_Call) Run(run func(_a0 context.Context, _a1 cid.Cid)) *DAGStore_Has_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(cid.Cid)) + }) + return _c +} + +func (_c *DAGStore_Has_Call) Return(_a0 bool, _a1 error) *DAGStore_Has_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DAGStore_Has_Call) RunAndReturn(run func(context.Context, cid.Cid) (bool, error)) *DAGStore_Has_Call { + _c.Call.Return(run) + return _c +} + +// HashOnRead provides a mock function with given fields: enabled +func (_m *DAGStore) HashOnRead(enabled bool) { + _m.Called(enabled) +} + +// DAGStore_HashOnRead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HashOnRead' +type DAGStore_HashOnRead_Call struct { + *mock.Call +} + +// HashOnRead is a helper method to define mock.On call +// - enabled bool +func (_e *DAGStore_Expecter) HashOnRead(enabled interface{}) *DAGStore_HashOnRead_Call { + return &DAGStore_HashOnRead_Call{Call: _e.mock.On("HashOnRead", enabled)} +} + +func (_c *DAGStore_HashOnRead_Call) Run(run func(enabled bool)) *DAGStore_HashOnRead_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(bool)) + }) + return _c +} + +func (_c *DAGStore_HashOnRead_Call) Return() *DAGStore_HashOnRead_Call { + _c.Call.Return() + return _c +} + +func (_c *DAGStore_HashOnRead_Call) RunAndReturn(run func(bool)) *DAGStore_HashOnRead_Call { + _c.Call.Return(run) + return _c +} + +// Put provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) Put(_a0 context.Context, _a1 blocks.Block) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, blocks.Block) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DAGStore_Put_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Put' +type DAGStore_Put_Call struct { + *mock.Call +} + +// Put is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 blocks.Block +func (_e *DAGStore_Expecter) Put(_a0 interface{}, _a1 interface{}) *DAGStore_Put_Call { + return &DAGStore_Put_Call{Call: _e.mock.On("Put", _a0, _a1)} +} + +func (_c *DAGStore_Put_Call) Run(run func(_a0 context.Context, _a1 blocks.Block)) *DAGStore_Put_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(blocks.Block)) + }) + return _c +} + +func (_c *DAGStore_Put_Call) Return(_a0 error) *DAGStore_Put_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DAGStore_Put_Call) RunAndReturn(run func(context.Context, blocks.Block) error) *DAGStore_Put_Call { + _c.Call.Return(run) + return _c +} + +// PutMany provides a mock function with given fields: _a0, _a1 +func (_m *DAGStore) PutMany(_a0 context.Context, _a1 []blocks.Block) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []blocks.Block) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DAGStore_PutMany_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PutMany' +type DAGStore_PutMany_Call struct { + *mock.Call +} + +// PutMany is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []blocks.Block +func (_e *DAGStore_Expecter) PutMany(_a0 interface{}, _a1 interface{}) *DAGStore_PutMany_Call { + return &DAGStore_PutMany_Call{Call: _e.mock.On("PutMany", _a0, _a1)} +} + +func (_c *DAGStore_PutMany_Call) Run(run func(_a0 context.Context, _a1 []blocks.Block)) *DAGStore_PutMany_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]blocks.Block)) + }) + return _c +} + +func (_c *DAGStore_PutMany_Call) Return(_a0 error) *DAGStore_PutMany_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DAGStore_PutMany_Call) RunAndReturn(run func(context.Context, []blocks.Block) error) *DAGStore_PutMany_Call { + _c.Call.Return(run) + return _c +} + +// NewDAGStore creates a new instance of DAGStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDAGStore(t interface { + mock.TestingT + Cleanup(func()) +}) *DAGStore { + mock := &DAGStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/datastore/mocks/DSReaderWriter.go b/datastore/mocks/DSReaderWriter.go new file mode 100644 index 0000000000..3d822f6d2c --- /dev/null +++ b/datastore/mocks/DSReaderWriter.go @@ -0,0 +1,399 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + datastore "github.com/ipfs/go-datastore" + + iterable "github.com/sourcenetwork/defradb/datastore/iterable" + + mock "github.com/stretchr/testify/mock" + + query "github.com/ipfs/go-datastore/query" +) + +// DSReaderWriter is an autogenerated mock type for the DSReaderWriter type +type DSReaderWriter struct { + mock.Mock +} + +type DSReaderWriter_Expecter struct { + mock *mock.Mock +} + +func (_m *DSReaderWriter) EXPECT() *DSReaderWriter_Expecter { + return &DSReaderWriter_Expecter{mock: &_m.Mock} +} + +// Delete provides a mock function with given fields: ctx, key +func (_m *DSReaderWriter) Delete(ctx context.Context, key datastore.Key) error { + ret := _m.Called(ctx, key) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DSReaderWriter_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type DSReaderWriter_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *DSReaderWriter_Expecter) Delete(ctx interface{}, key interface{}) *DSReaderWriter_Delete_Call { + return &DSReaderWriter_Delete_Call{Call: _e.mock.On("Delete", ctx, key)} +} + +func (_c *DSReaderWriter_Delete_Call) Run(run func(ctx context.Context, key datastore.Key)) *DSReaderWriter_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *DSReaderWriter_Delete_Call) Return(_a0 error) *DSReaderWriter_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DSReaderWriter_Delete_Call) RunAndReturn(run func(context.Context, datastore.Key) error) *DSReaderWriter_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: ctx, key +func (_m *DSReaderWriter) Get(ctx context.Context, key datastore.Key) ([]byte, error) { + ret := _m.Called(ctx, key) + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) ([]byte, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) []byte); ok { + r0 = rf(ctx, key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DSReaderWriter_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type DSReaderWriter_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *DSReaderWriter_Expecter) Get(ctx interface{}, key interface{}) *DSReaderWriter_Get_Call { + return &DSReaderWriter_Get_Call{Call: _e.mock.On("Get", ctx, key)} +} + +func (_c *DSReaderWriter_Get_Call) Run(run func(ctx context.Context, key datastore.Key)) *DSReaderWriter_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *DSReaderWriter_Get_Call) Return(value []byte, err error) *DSReaderWriter_Get_Call { + _c.Call.Return(value, err) + return _c +} + +func (_c *DSReaderWriter_Get_Call) RunAndReturn(run func(context.Context, datastore.Key) ([]byte, error)) *DSReaderWriter_Get_Call { + _c.Call.Return(run) + return _c +} + +// GetIterator provides a mock function with given fields: q +func (_m *DSReaderWriter) GetIterator(q query.Query) (iterable.Iterator, error) { + ret := _m.Called(q) + + var r0 iterable.Iterator + var r1 error + if rf, ok := ret.Get(0).(func(query.Query) (iterable.Iterator, error)); ok { + return rf(q) + } + if rf, ok := ret.Get(0).(func(query.Query) iterable.Iterator); ok { + r0 = rf(q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(iterable.Iterator) + } + } + + if rf, ok := ret.Get(1).(func(query.Query) error); ok { + r1 = rf(q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DSReaderWriter_GetIterator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetIterator' +type DSReaderWriter_GetIterator_Call struct { + *mock.Call +} + +// GetIterator is a helper method to define mock.On call +// - q query.Query +func (_e *DSReaderWriter_Expecter) GetIterator(q interface{}) *DSReaderWriter_GetIterator_Call { + return &DSReaderWriter_GetIterator_Call{Call: _e.mock.On("GetIterator", q)} +} + +func (_c *DSReaderWriter_GetIterator_Call) Run(run func(q query.Query)) *DSReaderWriter_GetIterator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(query.Query)) + }) + return _c +} + +func (_c *DSReaderWriter_GetIterator_Call) Return(_a0 iterable.Iterator, _a1 error) *DSReaderWriter_GetIterator_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DSReaderWriter_GetIterator_Call) RunAndReturn(run func(query.Query) (iterable.Iterator, error)) *DSReaderWriter_GetIterator_Call { + _c.Call.Return(run) + return _c +} + +// GetSize provides a mock function with given fields: ctx, key +func (_m *DSReaderWriter) GetSize(ctx context.Context, key datastore.Key) (int, error) { + ret := _m.Called(ctx, key) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (int, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) int); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DSReaderWriter_GetSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSize' +type DSReaderWriter_GetSize_Call struct { + *mock.Call +} + +// GetSize is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *DSReaderWriter_Expecter) GetSize(ctx interface{}, key interface{}) *DSReaderWriter_GetSize_Call { + return &DSReaderWriter_GetSize_Call{Call: _e.mock.On("GetSize", ctx, key)} +} + +func (_c *DSReaderWriter_GetSize_Call) Run(run func(ctx context.Context, key datastore.Key)) *DSReaderWriter_GetSize_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *DSReaderWriter_GetSize_Call) Return(size int, err error) *DSReaderWriter_GetSize_Call { + _c.Call.Return(size, err) + return _c +} + +func (_c *DSReaderWriter_GetSize_Call) RunAndReturn(run func(context.Context, datastore.Key) (int, error)) *DSReaderWriter_GetSize_Call { + _c.Call.Return(run) + return _c +} + +// Has provides a mock function with given fields: ctx, key +func (_m *DSReaderWriter) Has(ctx context.Context, key datastore.Key) (bool, error) { + ret := _m.Called(ctx, key) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (bool, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) bool); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DSReaderWriter_Has_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Has' +type DSReaderWriter_Has_Call struct { + *mock.Call +} + +// Has is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *DSReaderWriter_Expecter) Has(ctx interface{}, key interface{}) *DSReaderWriter_Has_Call { + return &DSReaderWriter_Has_Call{Call: _e.mock.On("Has", ctx, key)} +} + +func (_c *DSReaderWriter_Has_Call) Run(run func(ctx context.Context, key datastore.Key)) *DSReaderWriter_Has_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *DSReaderWriter_Has_Call) Return(exists bool, err error) *DSReaderWriter_Has_Call { + _c.Call.Return(exists, err) + return _c +} + +func (_c *DSReaderWriter_Has_Call) RunAndReturn(run func(context.Context, datastore.Key) (bool, error)) *DSReaderWriter_Has_Call { + _c.Call.Return(run) + return _c +} + +// Put provides a mock function with given fields: ctx, key, value +func (_m *DSReaderWriter) Put(ctx context.Context, key datastore.Key, value []byte) error { + ret := _m.Called(ctx, key, value) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key, []byte) error); ok { + r0 = rf(ctx, key, value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DSReaderWriter_Put_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Put' +type DSReaderWriter_Put_Call struct { + *mock.Call +} + +// Put is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +// - value []byte +func (_e *DSReaderWriter_Expecter) Put(ctx interface{}, key interface{}, value interface{}) *DSReaderWriter_Put_Call { + return &DSReaderWriter_Put_Call{Call: _e.mock.On("Put", ctx, key, value)} +} + +func (_c *DSReaderWriter_Put_Call) Run(run func(ctx context.Context, key datastore.Key, value []byte)) *DSReaderWriter_Put_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key), args[2].([]byte)) + }) + return _c +} + +func (_c *DSReaderWriter_Put_Call) Return(_a0 error) *DSReaderWriter_Put_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DSReaderWriter_Put_Call) RunAndReturn(run func(context.Context, datastore.Key, []byte) error) *DSReaderWriter_Put_Call { + _c.Call.Return(run) + return _c +} + +// Query provides a mock function with given fields: ctx, q +func (_m *DSReaderWriter) Query(ctx context.Context, q query.Query) (query.Results, error) { + ret := _m.Called(ctx, q) + + var r0 query.Results + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, query.Query) (query.Results, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, query.Query) query.Results); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(query.Results) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, query.Query) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DSReaderWriter_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' +type DSReaderWriter_Query_Call struct { + *mock.Call +} + +// Query is a helper method to define mock.On call +// - ctx context.Context +// - q query.Query +func (_e *DSReaderWriter_Expecter) Query(ctx interface{}, q interface{}) *DSReaderWriter_Query_Call { + return &DSReaderWriter_Query_Call{Call: _e.mock.On("Query", ctx, q)} +} + +func (_c *DSReaderWriter_Query_Call) Run(run func(ctx context.Context, q query.Query)) *DSReaderWriter_Query_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(query.Query)) + }) + return _c +} + +func (_c *DSReaderWriter_Query_Call) Return(_a0 query.Results, _a1 error) *DSReaderWriter_Query_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DSReaderWriter_Query_Call) RunAndReturn(run func(context.Context, query.Query) (query.Results, error)) *DSReaderWriter_Query_Call { + _c.Call.Return(run) + return _c +} + +// NewDSReaderWriter creates a new instance of DSReaderWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDSReaderWriter(t interface { + mock.TestingT + Cleanup(func()) +}) *DSReaderWriter { + mock := &DSReaderWriter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/datastore/mocks/Results.go b/datastore/mocks/Results.go new file mode 100644 index 0000000000..69e19a420e --- /dev/null +++ b/datastore/mocks/Results.go @@ -0,0 +1,309 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + goprocess "github.com/jbenet/goprocess" + mock "github.com/stretchr/testify/mock" + + query "github.com/ipfs/go-datastore/query" +) + +// Results is an autogenerated mock type for the Results type +type Results struct { + mock.Mock +} + +type Results_Expecter struct { + mock *mock.Mock +} + +func (_m *Results) EXPECT() *Results_Expecter { + return &Results_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *Results) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Results_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type Results_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *Results_Expecter) Close() *Results_Close_Call { + return &Results_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *Results_Close_Call) Run(run func()) *Results_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_Close_Call) Return(_a0 error) *Results_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Results_Close_Call) RunAndReturn(run func() error) *Results_Close_Call { + _c.Call.Return(run) + return _c +} + +// Next provides a mock function with given fields: +func (_m *Results) Next() <-chan query.Result { + ret := _m.Called() + + var r0 <-chan query.Result + if rf, ok := ret.Get(0).(func() <-chan query.Result); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan query.Result) + } + } + + return r0 +} + +// Results_Next_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Next' +type Results_Next_Call struct { + *mock.Call +} + +// Next is a helper method to define mock.On call +func (_e *Results_Expecter) Next() *Results_Next_Call { + return &Results_Next_Call{Call: _e.mock.On("Next")} +} + +func (_c *Results_Next_Call) Run(run func()) *Results_Next_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_Next_Call) Return(_a0 <-chan query.Result) *Results_Next_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Results_Next_Call) RunAndReturn(run func() <-chan query.Result) *Results_Next_Call { + _c.Call.Return(run) + return _c +} + +// NextSync provides a mock function with given fields: +func (_m *Results) NextSync() (query.Result, bool) { + ret := _m.Called() + + var r0 query.Result + var r1 bool + if rf, ok := ret.Get(0).(func() (query.Result, bool)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() query.Result); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(query.Result) + } + + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Results_NextSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NextSync' +type Results_NextSync_Call struct { + *mock.Call +} + +// NextSync is a helper method to define mock.On call +func (_e *Results_Expecter) NextSync() *Results_NextSync_Call { + return &Results_NextSync_Call{Call: _e.mock.On("NextSync")} +} + +func (_c *Results_NextSync_Call) Run(run func()) *Results_NextSync_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_NextSync_Call) Return(_a0 query.Result, _a1 bool) *Results_NextSync_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Results_NextSync_Call) RunAndReturn(run func() (query.Result, bool)) *Results_NextSync_Call { + _c.Call.Return(run) + return _c +} + +// Process provides a mock function with given fields: +func (_m *Results) Process() goprocess.Process { + ret := _m.Called() + + var r0 goprocess.Process + if rf, ok := ret.Get(0).(func() goprocess.Process); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(goprocess.Process) + } + } + + return r0 +} + +// Results_Process_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Process' +type Results_Process_Call struct { + *mock.Call +} + +// Process is a helper method to define mock.On call +func (_e *Results_Expecter) Process() *Results_Process_Call { + return &Results_Process_Call{Call: _e.mock.On("Process")} +} + +func (_c *Results_Process_Call) Run(run func()) *Results_Process_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_Process_Call) Return(_a0 goprocess.Process) *Results_Process_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Results_Process_Call) RunAndReturn(run func() goprocess.Process) *Results_Process_Call { + _c.Call.Return(run) + return _c +} + +// Query provides a mock function with given fields: +func (_m *Results) Query() query.Query { + ret := _m.Called() + + var r0 query.Query + if rf, ok := ret.Get(0).(func() query.Query); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(query.Query) + } + + return r0 +} + +// Results_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' +type Results_Query_Call struct { + *mock.Call +} + +// Query is a helper method to define mock.On call +func (_e *Results_Expecter) Query() *Results_Query_Call { + return &Results_Query_Call{Call: _e.mock.On("Query")} +} + +func (_c *Results_Query_Call) Run(run func()) *Results_Query_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_Query_Call) Return(_a0 query.Query) *Results_Query_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Results_Query_Call) RunAndReturn(run func() query.Query) *Results_Query_Call { + _c.Call.Return(run) + return _c +} + +// Rest provides a mock function with given fields: +func (_m *Results) Rest() ([]query.Entry, error) { + ret := _m.Called() + + var r0 []query.Entry + var r1 error + if rf, ok := ret.Get(0).(func() ([]query.Entry, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []query.Entry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]query.Entry) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Results_Rest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Rest' +type Results_Rest_Call struct { + *mock.Call +} + +// Rest is a helper method to define mock.On call +func (_e *Results_Expecter) Rest() *Results_Rest_Call { + return &Results_Rest_Call{Call: _e.mock.On("Rest")} +} + +func (_c *Results_Rest_Call) Run(run func()) *Results_Rest_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Results_Rest_Call) Return(_a0 []query.Entry, _a1 error) *Results_Rest_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Results_Rest_Call) RunAndReturn(run func() ([]query.Entry, error)) *Results_Rest_Call { + _c.Call.Return(run) + return _c +} + +// NewResults creates a new instance of Results. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewResults(t interface { + mock.TestingT + Cleanup(func()) +}) *Results { + mock := &Results{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/datastore/mocks/RootStore.go b/datastore/mocks/RootStore.go new file mode 100644 index 0000000000..96f9cb6256 --- /dev/null +++ b/datastore/mocks/RootStore.go @@ -0,0 +1,536 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + datastore "github.com/ipfs/go-datastore" + + mock "github.com/stretchr/testify/mock" + + query "github.com/ipfs/go-datastore/query" +) + +// RootStore is an autogenerated mock type for the RootStore type +type RootStore struct { + mock.Mock +} + +type RootStore_Expecter struct { + mock *mock.Mock +} + +func (_m *RootStore) EXPECT() *RootStore_Expecter { + return &RootStore_Expecter{mock: &_m.Mock} +} + +// Batch provides a mock function with given fields: ctx +func (_m *RootStore) Batch(ctx context.Context) (datastore.Batch, error) { + ret := _m.Called(ctx) + + var r0 datastore.Batch + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (datastore.Batch, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) datastore.Batch); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.Batch) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_Batch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Batch' +type RootStore_Batch_Call struct { + *mock.Call +} + +// Batch is a helper method to define mock.On call +// - ctx context.Context +func (_e *RootStore_Expecter) Batch(ctx interface{}) *RootStore_Batch_Call { + return &RootStore_Batch_Call{Call: _e.mock.On("Batch", ctx)} +} + +func (_c *RootStore_Batch_Call) Run(run func(ctx context.Context)) *RootStore_Batch_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *RootStore_Batch_Call) Return(_a0 datastore.Batch, _a1 error) *RootStore_Batch_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RootStore_Batch_Call) RunAndReturn(run func(context.Context) (datastore.Batch, error)) *RootStore_Batch_Call { + _c.Call.Return(run) + return _c +} + +// Close provides a mock function with given fields: +func (_m *RootStore) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RootStore_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type RootStore_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *RootStore_Expecter) Close() *RootStore_Close_Call { + return &RootStore_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *RootStore_Close_Call) Run(run func()) *RootStore_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *RootStore_Close_Call) Return(_a0 error) *RootStore_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RootStore_Close_Call) RunAndReturn(run func() error) *RootStore_Close_Call { + _c.Call.Return(run) + return _c +} + +// Delete provides a mock function with given fields: ctx, key +func (_m *RootStore) Delete(ctx context.Context, key datastore.Key) error { + ret := _m.Called(ctx, key) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RootStore_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type RootStore_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *RootStore_Expecter) Delete(ctx interface{}, key interface{}) *RootStore_Delete_Call { + return &RootStore_Delete_Call{Call: _e.mock.On("Delete", ctx, key)} +} + +func (_c *RootStore_Delete_Call) Run(run func(ctx context.Context, key datastore.Key)) *RootStore_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *RootStore_Delete_Call) Return(_a0 error) *RootStore_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RootStore_Delete_Call) RunAndReturn(run func(context.Context, datastore.Key) error) *RootStore_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: ctx, key +func (_m *RootStore) Get(ctx context.Context, key datastore.Key) ([]byte, error) { + ret := _m.Called(ctx, key) + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) ([]byte, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) []byte); ok { + r0 = rf(ctx, key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type RootStore_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *RootStore_Expecter) Get(ctx interface{}, key interface{}) *RootStore_Get_Call { + return &RootStore_Get_Call{Call: _e.mock.On("Get", ctx, key)} +} + +func (_c *RootStore_Get_Call) Run(run func(ctx context.Context, key datastore.Key)) *RootStore_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *RootStore_Get_Call) Return(value []byte, err error) *RootStore_Get_Call { + _c.Call.Return(value, err) + return _c +} + +func (_c *RootStore_Get_Call) RunAndReturn(run func(context.Context, datastore.Key) ([]byte, error)) *RootStore_Get_Call { + _c.Call.Return(run) + return _c +} + +// GetSize provides a mock function with given fields: ctx, key +func (_m *RootStore) GetSize(ctx context.Context, key datastore.Key) (int, error) { + ret := _m.Called(ctx, key) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (int, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) int); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_GetSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSize' +type RootStore_GetSize_Call struct { + *mock.Call +} + +// GetSize is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *RootStore_Expecter) GetSize(ctx interface{}, key interface{}) *RootStore_GetSize_Call { + return &RootStore_GetSize_Call{Call: _e.mock.On("GetSize", ctx, key)} +} + +func (_c *RootStore_GetSize_Call) Run(run func(ctx context.Context, key datastore.Key)) *RootStore_GetSize_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *RootStore_GetSize_Call) Return(size int, err error) *RootStore_GetSize_Call { + _c.Call.Return(size, err) + return _c +} + +func (_c *RootStore_GetSize_Call) RunAndReturn(run func(context.Context, datastore.Key) (int, error)) *RootStore_GetSize_Call { + _c.Call.Return(run) + return _c +} + +// Has provides a mock function with given fields: ctx, key +func (_m *RootStore) Has(ctx context.Context, key datastore.Key) (bool, error) { + ret := _m.Called(ctx, key) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (bool, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) bool); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, datastore.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_Has_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Has' +type RootStore_Has_Call struct { + *mock.Call +} + +// Has is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +func (_e *RootStore_Expecter) Has(ctx interface{}, key interface{}) *RootStore_Has_Call { + return &RootStore_Has_Call{Call: _e.mock.On("Has", ctx, key)} +} + +func (_c *RootStore_Has_Call) Run(run func(ctx context.Context, key datastore.Key)) *RootStore_Has_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *RootStore_Has_Call) Return(exists bool, err error) *RootStore_Has_Call { + _c.Call.Return(exists, err) + return _c +} + +func (_c *RootStore_Has_Call) RunAndReturn(run func(context.Context, datastore.Key) (bool, error)) *RootStore_Has_Call { + _c.Call.Return(run) + return _c +} + +// NewTransaction provides a mock function with given fields: ctx, readOnly +func (_m *RootStore) NewTransaction(ctx context.Context, readOnly bool) (datastore.Txn, error) { + ret := _m.Called(ctx, readOnly) + + var r0 datastore.Txn + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok { + return rf(ctx, readOnly) + } + if rf, ok := ret.Get(0).(func(context.Context, bool) datastore.Txn); ok { + r0 = rf(ctx, readOnly) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.Txn) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok { + r1 = rf(ctx, readOnly) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_NewTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewTransaction' +type RootStore_NewTransaction_Call struct { + *mock.Call +} + +// NewTransaction is a helper method to define mock.On call +// - ctx context.Context +// - readOnly bool +func (_e *RootStore_Expecter) NewTransaction(ctx interface{}, readOnly interface{}) *RootStore_NewTransaction_Call { + return &RootStore_NewTransaction_Call{Call: _e.mock.On("NewTransaction", ctx, readOnly)} +} + +func (_c *RootStore_NewTransaction_Call) Run(run func(ctx context.Context, readOnly bool)) *RootStore_NewTransaction_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(bool)) + }) + return _c +} + +func (_c *RootStore_NewTransaction_Call) Return(_a0 datastore.Txn, _a1 error) *RootStore_NewTransaction_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RootStore_NewTransaction_Call) RunAndReturn(run func(context.Context, bool) (datastore.Txn, error)) *RootStore_NewTransaction_Call { + _c.Call.Return(run) + return _c +} + +// Put provides a mock function with given fields: ctx, key, value +func (_m *RootStore) Put(ctx context.Context, key datastore.Key, value []byte) error { + ret := _m.Called(ctx, key, value) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key, []byte) error); ok { + r0 = rf(ctx, key, value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RootStore_Put_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Put' +type RootStore_Put_Call struct { + *mock.Call +} + +// Put is a helper method to define mock.On call +// - ctx context.Context +// - key datastore.Key +// - value []byte +func (_e *RootStore_Expecter) Put(ctx interface{}, key interface{}, value interface{}) *RootStore_Put_Call { + return &RootStore_Put_Call{Call: _e.mock.On("Put", ctx, key, value)} +} + +func (_c *RootStore_Put_Call) Run(run func(ctx context.Context, key datastore.Key, value []byte)) *RootStore_Put_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key), args[2].([]byte)) + }) + return _c +} + +func (_c *RootStore_Put_Call) Return(_a0 error) *RootStore_Put_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RootStore_Put_Call) RunAndReturn(run func(context.Context, datastore.Key, []byte) error) *RootStore_Put_Call { + _c.Call.Return(run) + return _c +} + +// Query provides a mock function with given fields: ctx, q +func (_m *RootStore) Query(ctx context.Context, q query.Query) (query.Results, error) { + ret := _m.Called(ctx, q) + + var r0 query.Results + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, query.Query) (query.Results, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, query.Query) query.Results); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(query.Results) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, query.Query) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootStore_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' +type RootStore_Query_Call struct { + *mock.Call +} + +// Query is a helper method to define mock.On call +// - ctx context.Context +// - q query.Query +func (_e *RootStore_Expecter) Query(ctx interface{}, q interface{}) *RootStore_Query_Call { + return &RootStore_Query_Call{Call: _e.mock.On("Query", ctx, q)} +} + +func (_c *RootStore_Query_Call) Run(run func(ctx context.Context, q query.Query)) *RootStore_Query_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(query.Query)) + }) + return _c +} + +func (_c *RootStore_Query_Call) Return(_a0 query.Results, _a1 error) *RootStore_Query_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *RootStore_Query_Call) RunAndReturn(run func(context.Context, query.Query) (query.Results, error)) *RootStore_Query_Call { + _c.Call.Return(run) + return _c +} + +// Sync provides a mock function with given fields: ctx, prefix +func (_m *RootStore) Sync(ctx context.Context, prefix datastore.Key) error { + ret := _m.Called(ctx, prefix) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok { + r0 = rf(ctx, prefix) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RootStore_Sync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sync' +type RootStore_Sync_Call struct { + *mock.Call +} + +// Sync is a helper method to define mock.On call +// - ctx context.Context +// - prefix datastore.Key +func (_e *RootStore_Expecter) Sync(ctx interface{}, prefix interface{}) *RootStore_Sync_Call { + return &RootStore_Sync_Call{Call: _e.mock.On("Sync", ctx, prefix)} +} + +func (_c *RootStore_Sync_Call) Run(run func(ctx context.Context, prefix datastore.Key)) *RootStore_Sync_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Key)) + }) + return _c +} + +func (_c *RootStore_Sync_Call) Return(_a0 error) *RootStore_Sync_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *RootStore_Sync_Call) RunAndReturn(run func(context.Context, datastore.Key) error) *RootStore_Sync_Call { + _c.Call.Return(run) + return _c +} + +// NewRootStore creates a new instance of RootStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRootStore(t interface { + mock.TestingT + Cleanup(func()) +}) *RootStore { + mock := &RootStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/datastore/mocks/Txn.go b/datastore/mocks/Txn.go new file mode 100644 index 0000000000..2fe024a9ad --- /dev/null +++ b/datastore/mocks/Txn.go @@ -0,0 +1,393 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + datastore "github.com/sourcenetwork/defradb/datastore" + mock "github.com/stretchr/testify/mock" +) + +// Txn is an autogenerated mock type for the Txn type +type Txn struct { + mock.Mock +} + +type Txn_Expecter struct { + mock *mock.Mock +} + +func (_m *Txn) EXPECT() *Txn_Expecter { + return &Txn_Expecter{mock: &_m.Mock} +} + +// Commit provides a mock function with given fields: ctx +func (_m *Txn) Commit(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Txn_Commit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Commit' +type Txn_Commit_Call struct { + *mock.Call +} + +// Commit is a helper method to define mock.On call +// - ctx context.Context +func (_e *Txn_Expecter) Commit(ctx interface{}) *Txn_Commit_Call { + return &Txn_Commit_Call{Call: _e.mock.On("Commit", ctx)} +} + +func (_c *Txn_Commit_Call) Run(run func(ctx context.Context)) *Txn_Commit_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Txn_Commit_Call) Return(_a0 error) *Txn_Commit_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Commit_Call) RunAndReturn(run func(context.Context) error) *Txn_Commit_Call { + _c.Call.Return(run) + return _c +} + +// DAGstore provides a mock function with given fields: +func (_m *Txn) DAGstore() datastore.DAGStore { + ret := _m.Called() + + var r0 datastore.DAGStore + if rf, ok := ret.Get(0).(func() datastore.DAGStore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DAGStore) + } + } + + return r0 +} + +// Txn_DAGstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DAGstore' +type Txn_DAGstore_Call struct { + *mock.Call +} + +// DAGstore is a helper method to define mock.On call +func (_e *Txn_Expecter) DAGstore() *Txn_DAGstore_Call { + return &Txn_DAGstore_Call{Call: _e.mock.On("DAGstore")} +} + +func (_c *Txn_DAGstore_Call) Run(run func()) *Txn_DAGstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_DAGstore_Call) Return(_a0 datastore.DAGStore) *Txn_DAGstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_DAGstore_Call) RunAndReturn(run func() datastore.DAGStore) *Txn_DAGstore_Call { + _c.Call.Return(run) + return _c +} + +// Datastore provides a mock function with given fields: +func (_m *Txn) Datastore() datastore.DSReaderWriter { + ret := _m.Called() + + var r0 datastore.DSReaderWriter + if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DSReaderWriter) + } + } + + return r0 +} + +// Txn_Datastore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Datastore' +type Txn_Datastore_Call struct { + *mock.Call +} + +// Datastore is a helper method to define mock.On call +func (_e *Txn_Expecter) Datastore() *Txn_Datastore_Call { + return &Txn_Datastore_Call{Call: _e.mock.On("Datastore")} +} + +func (_c *Txn_Datastore_Call) Run(run func()) *Txn_Datastore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_Datastore_Call) Return(_a0 datastore.DSReaderWriter) *Txn_Datastore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Datastore_Call) RunAndReturn(run func() datastore.DSReaderWriter) *Txn_Datastore_Call { + _c.Call.Return(run) + return _c +} + +// Discard provides a mock function with given fields: ctx +func (_m *Txn) Discard(ctx context.Context) { + _m.Called(ctx) +} + +// Txn_Discard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Discard' +type Txn_Discard_Call struct { + *mock.Call +} + +// Discard is a helper method to define mock.On call +// - ctx context.Context +func (_e *Txn_Expecter) Discard(ctx interface{}) *Txn_Discard_Call { + return &Txn_Discard_Call{Call: _e.mock.On("Discard", ctx)} +} + +func (_c *Txn_Discard_Call) Run(run func(ctx context.Context)) *Txn_Discard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Txn_Discard_Call) Return() *Txn_Discard_Call { + _c.Call.Return() + return _c +} + +func (_c *Txn_Discard_Call) RunAndReturn(run func(context.Context)) *Txn_Discard_Call { + _c.Call.Return(run) + return _c +} + +// Headstore provides a mock function with given fields: +func (_m *Txn) Headstore() datastore.DSReaderWriter { + ret := _m.Called() + + var r0 datastore.DSReaderWriter + if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DSReaderWriter) + } + } + + return r0 +} + +// Txn_Headstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Headstore' +type Txn_Headstore_Call struct { + *mock.Call +} + +// Headstore is a helper method to define mock.On call +func (_e *Txn_Expecter) Headstore() *Txn_Headstore_Call { + return &Txn_Headstore_Call{Call: _e.mock.On("Headstore")} +} + +func (_c *Txn_Headstore_Call) Run(run func()) *Txn_Headstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_Headstore_Call) Return(_a0 datastore.DSReaderWriter) *Txn_Headstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Headstore_Call) RunAndReturn(run func() datastore.DSReaderWriter) *Txn_Headstore_Call { + _c.Call.Return(run) + return _c +} + +// OnError provides a mock function with given fields: fn +func (_m *Txn) OnError(fn func()) { + _m.Called(fn) +} + +// Txn_OnError_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnError' +type Txn_OnError_Call struct { + *mock.Call +} + +// OnError is a helper method to define mock.On call +// - fn func() +func (_e *Txn_Expecter) OnError(fn interface{}) *Txn_OnError_Call { + return &Txn_OnError_Call{Call: _e.mock.On("OnError", fn)} +} + +func (_c *Txn_OnError_Call) Run(run func(fn func())) *Txn_OnError_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func())) + }) + return _c +} + +func (_c *Txn_OnError_Call) Return() *Txn_OnError_Call { + _c.Call.Return() + return _c +} + +func (_c *Txn_OnError_Call) RunAndReturn(run func(func())) *Txn_OnError_Call { + _c.Call.Return(run) + return _c +} + +// OnSuccess provides a mock function with given fields: fn +func (_m *Txn) OnSuccess(fn func()) { + _m.Called(fn) +} + +// Txn_OnSuccess_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnSuccess' +type Txn_OnSuccess_Call struct { + *mock.Call +} + +// OnSuccess is a helper method to define mock.On call +// - fn func() +func (_e *Txn_Expecter) OnSuccess(fn interface{}) *Txn_OnSuccess_Call { + return &Txn_OnSuccess_Call{Call: _e.mock.On("OnSuccess", fn)} +} + +func (_c *Txn_OnSuccess_Call) Run(run func(fn func())) *Txn_OnSuccess_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func())) + }) + return _c +} + +func (_c *Txn_OnSuccess_Call) Return() *Txn_OnSuccess_Call { + _c.Call.Return() + return _c +} + +func (_c *Txn_OnSuccess_Call) RunAndReturn(run func(func())) *Txn_OnSuccess_Call { + _c.Call.Return(run) + return _c +} + +// Rootstore provides a mock function with given fields: +func (_m *Txn) Rootstore() datastore.DSReaderWriter { + ret := _m.Called() + + var r0 datastore.DSReaderWriter + if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DSReaderWriter) + } + } + + return r0 +} + +// Txn_Rootstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Rootstore' +type Txn_Rootstore_Call struct { + *mock.Call +} + +// Rootstore is a helper method to define mock.On call +func (_e *Txn_Expecter) Rootstore() *Txn_Rootstore_Call { + return &Txn_Rootstore_Call{Call: _e.mock.On("Rootstore")} +} + +func (_c *Txn_Rootstore_Call) Run(run func()) *Txn_Rootstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_Rootstore_Call) Return(_a0 datastore.DSReaderWriter) *Txn_Rootstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Rootstore_Call) RunAndReturn(run func() datastore.DSReaderWriter) *Txn_Rootstore_Call { + _c.Call.Return(run) + return _c +} + +// Systemstore provides a mock function with given fields: +func (_m *Txn) Systemstore() datastore.DSReaderWriter { + ret := _m.Called() + + var r0 datastore.DSReaderWriter + if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DSReaderWriter) + } + } + + return r0 +} + +// Txn_Systemstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Systemstore' +type Txn_Systemstore_Call struct { + *mock.Call +} + +// Systemstore is a helper method to define mock.On call +func (_e *Txn_Expecter) Systemstore() *Txn_Systemstore_Call { + return &Txn_Systemstore_Call{Call: _e.mock.On("Systemstore")} +} + +func (_c *Txn_Systemstore_Call) Run(run func()) *Txn_Systemstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_Systemstore_Call) Return(_a0 datastore.DSReaderWriter) *Txn_Systemstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Systemstore_Call) RunAndReturn(run func() datastore.DSReaderWriter) *Txn_Systemstore_Call { + _c.Call.Return(run) + return _c +} + +// NewTxn creates a new instance of Txn. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxn(t interface { + mock.TestingT + Cleanup(func()) +}) *Txn { + mock := &Txn{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/datastore/mocks/utils.go b/datastore/mocks/utils.go new file mode 100644 index 0000000000..af91fc6d3a --- /dev/null +++ b/datastore/mocks/utils.go @@ -0,0 +1,116 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package mocks + +import ( + "testing" + + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "github.com/stretchr/testify/mock" +) + +type MultiStoreTxn struct { + *Txn + t *testing.T + MockRootstore *DSReaderWriter + MockDatastore *DSReaderWriter + MockHeadstore *DSReaderWriter + MockDAGstore *DAGStore + MockSystemstore *DSReaderWriter +} + +func prepareDataStore(t *testing.T) *DSReaderWriter { + dataStore := NewDSReaderWriter(t) + dataStore.EXPECT().Get(mock.Anything, mock.Anything).Return([]byte{}, ds.ErrNotFound).Maybe() + dataStore.EXPECT().Put(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + dataStore.EXPECT().Has(mock.Anything, mock.Anything).Return(true, nil).Maybe() + return dataStore +} + +func prepareRootStore(t *testing.T) *DSReaderWriter { + return NewDSReaderWriter(t) +} + +func prepareHeadStore(t *testing.T) *DSReaderWriter { + headStore := NewDSReaderWriter(t) + + headStore.EXPECT().Query(mock.Anything, mock.Anything). + Return(NewQueryResultsWithValues(t), nil).Maybe() + + headStore.EXPECT().Get(mock.Anything, mock.Anything).Return([]byte{}, ds.ErrNotFound).Maybe() + headStore.EXPECT().Put(mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + headStore.EXPECT().Has(mock.Anything, mock.Anything).Return(false, nil).Maybe() + return headStore +} + +func prepareSystemStore(t *testing.T) *DSReaderWriter { + systemStore := NewDSReaderWriter(t) + systemStore.EXPECT().Get(mock.Anything, mock.Anything).Return([]byte{}, nil).Maybe() + return systemStore +} + +func prepareDAGStore(t *testing.T) *DAGStore { + dagStore := NewDAGStore(t) + dagStore.EXPECT().Put(mock.Anything, mock.Anything).Return(nil).Maybe() + dagStore.EXPECT().Has(mock.Anything, mock.Anything).Return(false, nil).Maybe() + return dagStore +} + +func NewTxnWithMultistore(t *testing.T) *MultiStoreTxn { + txn := NewTxn(t) + txn.EXPECT().OnSuccess(mock.Anything).Maybe() + + result := &MultiStoreTxn{ + Txn: txn, + t: t, + MockRootstore: prepareRootStore(t), + MockDatastore: prepareDataStore(t), + MockHeadstore: prepareHeadStore(t), + MockDAGstore: prepareDAGStore(t), + MockSystemstore: prepareSystemStore(t), + } + + txn.EXPECT().Rootstore().Return(result.MockRootstore).Maybe() + txn.EXPECT().Datastore().Return(result.MockDatastore).Maybe() + txn.EXPECT().Headstore().Return(result.MockHeadstore).Maybe() + txn.EXPECT().DAGstore().Return(result.MockDAGstore).Maybe() + txn.EXPECT().Systemstore().Return(result.MockSystemstore).Maybe() + + return result +} + +func (txn *MultiStoreTxn) ClearSystemStore() *MultiStoreTxn { + txn.MockSystemstore = NewDSReaderWriter(txn.t) + txn.EXPECT().Systemstore().Unset() + txn.EXPECT().Systemstore().Return(txn.MockSystemstore).Maybe() + return txn +} + +func NewQueryResultsWithValues(t *testing.T, values ...[]byte) *Results { + results := make([]query.Result, len(values)) + for i, value := range values { + results[i] = query.Result{Entry: query.Entry{Value: value}} + } + return NewQueryResultsWithResults(t, results...) +} + +func NewQueryResultsWithResults(t *testing.T, results ...query.Result) *Results { + queryResults := NewResults(t) + resultChan := make(chan query.Result, len(results)) + for _, result := range results { + resultChan <- result + } + close(resultChan) + queryResults.EXPECT().Next().Return(resultChan).Maybe() + queryResults.EXPECT().Close().Return(nil).Maybe() + return queryResults +} diff --git a/db/backup.go b/db/backup.go new file mode 100644 index 0000000000..89925a6c53 --- /dev/null +++ b/db/backup.go @@ -0,0 +1,392 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "os" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/datastore" +) + +func (db *db) basicImport(ctx context.Context, txn datastore.Txn, filepath string) (err error) { + f, err := os.Open(filepath) + if err != nil { + return NewErrOpenFile(err, filepath) + } + defer func() { + closeErr := f.Close() + if closeErr != nil { + err = NewErrCloseFile(closeErr, err) + } + }() + + d := json.NewDecoder(bufio.NewReader(f)) + + t, err := d.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return ErrExpectedJSONObject + } + for d.More() { + t, err := d.Token() + if err != nil { + return err + } + colName := t.(string) + col, err := db.getCollectionByName(ctx, txn, colName) + if err != nil { + return NewErrFailedToGetCollection(colName, err) + } + + t, err = d.Token() + if err != nil { + return err + } + if t != json.Delim('[') { + return ErrExpectedJSONArray + } + + for d.More() { + docMap := map[string]any{} + err = d.Decode(&docMap) + if err != nil { + return NewErrJSONDecode(err) + } + + // check if self referencing and remove from docMap for key creation + resetMap := map[string]any{} + for _, field := range col.Schema().Fields { + if field.Kind == client.FieldKind_FOREIGN_OBJECT { + if val, ok := docMap[field.Name+request.RelatedObjectID]; ok { + if docMap["_newKey"] == val { + resetMap[field.Name+request.RelatedObjectID] = val + delete(docMap, field.Name+request.RelatedObjectID) + } + } + } + } + + delete(docMap, "_key") + delete(docMap, "_newKey") + + doc, err := client.NewDocFromMap(docMap) + if err != nil { + return NewErrDocFromMap(err) + } + + err = col.WithTxn(txn).Create(ctx, doc) + if err != nil { + return NewErrDocCreate(err) + } + + // add back the self referencing fields and update doc. + for k, v := range resetMap { + err := doc.Set(k, v) + if err != nil { + return NewErrDocUpdate(err) + } + err = col.WithTxn(txn).Update(ctx, doc) + if err != nil { + return NewErrDocUpdate(err) + } + } + } + _, err = d.Token() + if err != nil { + return err + } + } + + return nil +} + +func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client.BackupConfig) (err error) { + // old key -> new Key + keyChangeCache := map[string]string{} + + cols := []client.Collection{} + if len(config.Collections) == 0 { + cols, err = db.getAllCollections(ctx, txn) + if err != nil { + return NewErrFailedToGetAllCollections(err) + } + } else { + for _, colName := range config.Collections { + col, err := db.getCollectionByName(ctx, txn, colName) + if err != nil { + return NewErrFailedToGetCollection(colName, err) + } + cols = append(cols, col) + } + } + colNameCache := map[string]struct{}{} + for _, col := range cols { + colNameCache[col.Name()] = struct{}{} + } + + tempFile := config.Filepath + ".temp" + f, err := os.Create(tempFile) + if err != nil { + return NewErrCreateFile(err, tempFile) + } + defer func() { + closeErr := f.Close() + if closeErr != nil { + err = NewErrCloseFile(closeErr, err) + } else if err != nil { + // ensure we cleanup if there was an error + removeErr := os.Remove(tempFile) + if removeErr != nil { + err = NewErrRemoveFile(removeErr, err, tempFile) + } + } else { + _ = os.Rename(tempFile, config.Filepath) + } + }() + + // open the object + err = writeString(f, "{", "{\n", config.Pretty) + if err != nil { + return err + } + + firstCol := true + for _, col := range cols { + if firstCol { + firstCol = false + } else { + // add collection seperator + err = writeString(f, ",", ",\n", config.Pretty) + if err != nil { + return err + } + } + + // set collection + err = writeString( + f, + fmt.Sprintf("\"%s\":[", col.Name()), + fmt.Sprintf(" \"%s\": [\n", col.Name()), + config.Pretty, + ) + if err != nil { + return err + } + colTxn := col.WithTxn(txn) + keysCh, err := colTxn.GetAllDocKeys(ctx) + if err != nil { + return err + } + + firstDoc := true + for key := range keysCh { + if firstDoc { + firstDoc = false + } else { + // add document seperator + err = writeString(f, ",", ",\n", config.Pretty) + if err != nil { + return err + } + } + doc, err := colTxn.Get(ctx, key.Key, false) + if err != nil { + return err + } + + isSelfReference := false + refFieldName := "" + // replace any foreing key if it needs to be changed + for _, field := range col.Schema().Fields { + switch field.Kind { + case client.FieldKind_FOREIGN_OBJECT: + if _, ok := colNameCache[field.Schema]; !ok { + continue + } + if foreignKey, err := doc.Get(field.Name + request.RelatedObjectID); err == nil { + if newKey, ok := keyChangeCache[foreignKey.(string)]; ok { + err := doc.Set(field.Name+request.RelatedObjectID, newKey) + if err != nil { + return err + } + if foreignKey.(string) == doc.Key().String() { + isSelfReference = true + refFieldName = field.Name + request.RelatedObjectID + } + } else { + foreignCol, err := db.getCollectionByName(ctx, txn, field.Schema) + if err != nil { + return NewErrFailedToGetCollection(field.Schema, err) + } + foreignDocKey, err := client.NewDocKeyFromString(foreignKey.(string)) + if err != nil { + return err + } + foreignDoc, err := foreignCol.Get(ctx, foreignDocKey, false) + if err != nil { + err := doc.Set(field.Name+request.RelatedObjectID, nil) + if err != nil { + return err + } + } else { + oldForeignDoc, err := foreignDoc.ToMap() + if err != nil { + return err + } + + // Temporary until https://github.com/sourcenetwork/defradb/issues/1681 is resolved. + ensureIntIsInt(foreignCol.Schema().Fields, oldForeignDoc) + + delete(oldForeignDoc, "_key") + if foreignDoc.Key().String() == foreignDocKey.String() { + delete(oldForeignDoc, field.Name+request.RelatedObjectID) + } + + if foreignDoc.Key().String() == doc.Key().String() { + isSelfReference = true + refFieldName = field.Name + request.RelatedObjectID + } + + newForeignDoc, err := client.NewDocFromMap(oldForeignDoc) + if err != nil { + return err + } + + if foreignDoc.Key().String() != doc.Key().String() { + err = doc.Set(field.Name+request.RelatedObjectID, newForeignDoc.Key().String()) + if err != nil { + return err + } + } + + if newForeignDoc.Key().String() != foreignDoc.Key().String() { + keyChangeCache[foreignDoc.Key().String()] = newForeignDoc.Key().String() + } + } + } + } + } + } + + docM, err := doc.ToMap() + if err != nil { + return err + } + + // Temporary until https://github.com/sourcenetwork/defradb/issues/1681 is resolved. + ensureIntIsInt(col.Schema().Fields, docM) + + delete(docM, "_key") + if isSelfReference { + delete(docM, refFieldName) + } + + newDoc, err := client.NewDocFromMap(docM) + if err != nil { + return err + } + // newKey is needed to let the user know what will be the key of the imported document. + docM["_newKey"] = newDoc.Key().String() + // NewDocFromMap removes the "_key" map item so we add it back. + docM["_key"] = doc.Key().String() + + if isSelfReference { + docM[refFieldName] = newDoc.Key().String() + } + + if newDoc.Key().String() != doc.Key().String() { + keyChangeCache[doc.Key().String()] = newDoc.Key().String() + } + + var b []byte + if config.Pretty { + _, err = f.WriteString(" ") + if err != nil { + return NewErrFailedToWriteString(err) + } + b, err = json.MarshalIndent(docM, " ", " ") + if err != nil { + return NewErrFailedToWriteString(err) + } + } else { + b, err = json.Marshal(docM) + if err != nil { + return err + } + } + + // write document + _, err = f.Write(b) + if err != nil { + return err + } + } + + // close collection + err = writeString(f, "]", "\n ]", config.Pretty) + if err != nil { + return err + } + } + + // close object + err = writeString(f, "}", "\n}", config.Pretty) + if err != nil { + return err + } + + err = f.Sync() + if err != nil { + return err + } + + return nil +} + +func writeString(f *os.File, normal, pretty string, isPretty bool) error { + if isPretty { + _, err := f.WriteString(pretty) + if err != nil { + return NewErrFailedToWriteString(err) + } + return nil + } + + _, err := f.WriteString(normal) + if err != nil { + return NewErrFailedToWriteString(err) + } + return nil +} + +// Temporary until https://github.com/sourcenetwork/defradb/issues/1681 is resolved. +func ensureIntIsInt(fields []client.FieldDescription, docMap map[string]any) { + for _, field := range fields { + if field.Kind == client.FieldKind_INT { + if val, ok := docMap[field.Name]; ok { + switch v := val.(type) { + case uint64: + docMap[field.Name] = int(v) + case int64: + docMap[field.Name] = int(v) + } + } + } + } +} diff --git a/db/backup_test.go b/db/backup_test.go new file mode 100644 index 0000000000..2f89f54a07 --- /dev/null +++ b/db/backup_test.go @@ -0,0 +1,552 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" +) + +func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`)) + require.NoError(t, err) + + col1, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + err = col1.Create(ctx, doc2) + require.NoError(t, err) + + doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) + require.NoError(t, err) + + col2, err := db.GetCollectionByName(ctx, "Address") + require.NoError(t, err) + + err = col2.Create(ctx, doc3) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + defer txn.Discard(ctx) + + filepath := t.TempDir() + "/test.json" + err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath}) + require.NoError(t, err) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_newKey":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + require.EqualValues(t, expectedMap, fileMap) +} + +func TestBasicExport_WithPrettyFormatting_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`)) + require.NoError(t, err) + + col1, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + err = col1.Create(ctx, doc2) + require.NoError(t, err) + + doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) + require.NoError(t, err) + + col2, err := db.GetCollectionByName(ctx, "Address") + require.NoError(t, err) + + err = col2.Create(ctx, doc3) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + defer txn.Discard(ctx) + + filepath := t.TempDir() + "/test.json" + err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath, Pretty: true}) + require.NoError(t, err) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_newKey":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + require.EqualValues(t, expectedMap, fileMap) +} + +func TestBasicExport_WithSingleCollection_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`)) + require.NoError(t, err) + + col1, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + err = col1.Create(ctx, doc2) + require.NoError(t, err) + + doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) + require.NoError(t, err) + + col2, err := db.GetCollectionByName(ctx, "Address") + require.NoError(t, err) + + err = col2.Create(ctx, doc3) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + defer txn.Discard(ctx) + + filepath := t.TempDir() + "/test.json" + err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath, Collections: []string{"Address"}}) + require.NoError(t, err) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + require.EqualValues(t, expectedMap, fileMap) +} + +func TestBasicExport_WithMultipleCollectionsAndUpdate_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + book: [Book] + } + + type Book { + name: String + author: User + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 31}`)) + require.NoError(t, err) + + col1, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + err = col1.Create(ctx, doc2) + require.NoError(t, err) + + doc3, err := client.NewDocFromJSON([]byte(`{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`)) + require.NoError(t, err) + + doc4, err := client.NewDocFromJSON([]byte(`{"name": "Game of chains", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`)) + require.NoError(t, err) + + col2, err := db.GetCollectionByName(ctx, "Book") + require.NoError(t, err) + + err = col2.Create(ctx, doc3) + require.NoError(t, err) + err = col2.Create(ctx, doc4) + require.NoError(t, err) + + err = doc1.Set("age", 31) + require.NoError(t, err) + + err = col1.Update(ctx, doc1) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + defer txn.Discard(ctx) + + filepath := t.TempDir() + "/test.json" + err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath}) + require.NoError(t, err) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Book":[{"_key":"bae-4399f189-138d-5d49-9e25-82e78463677b","_newKey":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Game of chains"},{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + require.EqualValues(t, expectedMap, fileMap) +} + +func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`)) + require.NoError(t, err) + + col1, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = col1.Create(ctx, doc1) + require.NoError(t, err) + + err = col1.Create(ctx, doc2) + require.NoError(t, err) + + doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) + require.NoError(t, err) + + col2, err := db.GetCollectionByName(ctx, "Address") + require.NoError(t, err) + + err = col2.Create(ctx, doc3) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + defer txn.Discard(ctx) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_newKey":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), + 0664, + ) + require.NoError(t, err) + + err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath, Collections: []string{"Address"}}) + require.NoError(t, err) + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + fileMap := map[string]any{} + err = json.Unmarshal(b, &fileMap) + require.NoError(t, err) + + expectedMap := map[string]any{} + data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}]}`) + err = json.Unmarshal(data, &expectedMap) + require.NoError(t, err) + require.EqualValues(t, expectedMap, fileMap) +} + +func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_newKey":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), + 0664, + ) + require.NoError(t, err) + + err = db.basicImport(ctx, txn, filepath) + require.NoError(t, err) + err = txn.Commit(ctx) + require.NoError(t, err) + + txn, err = db.NewTxn(ctx, true) + require.NoError(t, err) + + col1, err := db.getCollectionByName(ctx, txn, "Address") + require.NoError(t, err) + + key1, err := client.NewDocKeyFromString("bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f") + require.NoError(t, err) + _, err = col1.Get(ctx, key1, false) + require.NoError(t, err) + + col2, err := db.getCollectionByName(ctx, txn, "User") + require.NoError(t, err) + + key2, err := client.NewDocKeyFromString("bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df") + require.NoError(t, err) + _, err = col2.Get(ctx, key2, false) + require.NoError(t, err) + + key3, err := client.NewDocKeyFromString("bae-e933420a-988a-56f8-8952-6c245aebd519") + require.NoError(t, err) + _, err = col2.Get(ctx, key3, false) + require.NoError(t, err) +} + +func TestBasicImport_WithJSONArray_ReturnError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`["Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","_newKey":"bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df","age":40,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]]`), + 0664, + ) + require.NoError(t, err) + + err = db.basicImport(ctx, txn, filepath) + require.ErrorIs(t, err, ErrExpectedJSONObject) + err = txn.Commit(ctx) + require.NoError(t, err) +} + +func TestBasicImport_WithObjectCollection_ReturnError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"Address":{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}}`), + 0664, + ) + require.NoError(t, err) + + err = db.basicImport(ctx, txn, filepath) + require.ErrorIs(t, err, ErrExpectedJSONArray) + err = txn.Commit(ctx) + require.NoError(t, err) +} + +func TestBasicImport_WithInvalidFilepath_ReturnError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"Address":{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}}`), + 0664, + ) + require.NoError(t, err) + + wrongFilepath := t.TempDir() + "/some/test.json" + err = db.basicImport(ctx, txn, wrongFilepath) + require.ErrorIs(t, err, os.ErrNotExist) + err = txn.Commit(ctx) + require.NoError(t, err) +} + +func TestBasicImport_WithInvalidCollection_ReturnError(t *testing.T) { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + defer db.Close(ctx) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + } + + type Address { + street: String + city: String + }`) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + filepath := t.TempDir() + "/test.json" + + err = os.WriteFile( + filepath, + []byte(`{"Addresses":{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}}`), + 0664, + ) + require.NoError(t, err) + + err = db.basicImport(ctx, txn, filepath) + require.ErrorIs(t, err, ErrFailedToGetCollection) + err = txn.Commit(ctx) + require.NoError(t, err) +} diff --git a/db/collection.go b/db/collection.go index a9131037c9..3430684697 100644 --- a/db/collection.go +++ b/db/collection.go @@ -22,16 +22,18 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/core" + ccid "github.com/sourcenetwork/defradb/core/cid" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/db/fetcher" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/lens" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/merkle/crdt" ) @@ -56,6 +58,9 @@ type collection struct { schemaID string desc client.CollectionDescription + + indexes []CollectionIndex + fetcherFactory func() fetcher.Fetcher } // @todo: Move the base Descriptions to an internal API within the db/ package. @@ -94,12 +99,31 @@ func (db *db) newCollection(desc client.CollectionDescription) (*collection, err } return &collection{ - db: db, - desc: desc, + db: db, + desc: client.CollectionDescription{ + ID: desc.ID, + Name: desc.Name, + Schema: desc.Schema, + }, colID: desc.ID, }, nil } +// newFetcher returns a new fetcher instance for this collection. +// If a fetcherFactory is set, it will be used to create the fetcher. +// It's a very simple factory, but it allows us to inject a mock fetcher +// for testing. +func (c *collection) newFetcher() fetcher.Fetcher { + var innerFetcher fetcher.Fetcher + if c.fetcherFactory != nil { + innerFetcher = c.fetcherFactory() + } else { + innerFetcher = new(fetcher.DocumentFetcher) + } + + return lens.NewFetcher(innerFetcher, c.db.LensRegistry()) +} + // createCollection creates a collection and saves it to the database in its system store. // Note: Collection.ID is an autoincrementing value that is generated by the database. func (db *db) createCollection( @@ -142,7 +166,7 @@ func (db *db) createCollection( } // add a reference to this DB by desc hash - cid, err := core.NewSHA256CidV1(globalSchemaBuf) + cid, err := ccid.NewSHA256CidV1(globalSchemaBuf) if err != nil { return nil, err } @@ -186,6 +210,12 @@ func (db *db) createCollection( logging.NewKV("Name", col.Name()), logging.NewKV("SchemaID", col.SchemaID()), ) + + for _, index := range desc.Indexes { + if _, err := col.createIndex(ctx, txn, index); err != nil { + return nil, err + } + } return col, nil } @@ -232,10 +262,11 @@ func (db *db) updateCollection( return nil, err } - cid, err := core.NewSHA256CidV1(globalSchemaBuf) + cid, err := ccid.NewSHA256CidV1(globalSchemaBuf) if err != nil { return nil, err } + previousSchemaVersionID := desc.Schema.VersionID schemaVersionID := cid.String() desc.Schema.VersionID = schemaVersionID @@ -264,6 +295,12 @@ func (db *db) updateCollection( return nil, err } + schemaVersionHistoryKey := core.NewSchemaHistoryKey(desc.Schema.SchemaID, previousSchemaVersionID) + err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte(schemaVersionID)) + if err != nil { + return nil, err + } + return db.getCollectionByName(ctx, txn, desc.Name) } @@ -276,7 +313,6 @@ func (db *db) validateUpdateCollection( txn datastore.Txn, proposedDesc client.CollectionDescription, ) (bool, error) { - var hasChanged bool existingCollection, err := db.getCollectionByName(ctx, txn, proposedDesc.Name) if err != nil { if errors.Is(err, ds.ErrNotFound) { @@ -310,6 +346,20 @@ func (db *db) validateUpdateCollection( return false, ErrCannotSetVersionID } + hasChangedFields, err := validateUpdateCollectionFields(existingDesc, proposedDesc) + if err != nil { + return hasChangedFields, err + } + + hasChangedIndexes, err := validateUpdateCollectionIndexes(existingDesc.Indexes, proposedDesc.Indexes) + return hasChangedFields || hasChangedIndexes, err +} + +func validateUpdateCollectionFields( + existingDesc client.CollectionDescription, + proposedDesc client.CollectionDescription, +) (bool, error) { + hasChanged := false existingFieldsByID := map[client.FieldID]client.FieldDescription{} existingFieldIndexesByName := map[string]int{} for i, field := range existingDesc.Schema.Fields { @@ -365,10 +415,40 @@ func (db *db) validateUpdateCollection( return false, NewErrCannotDeleteField(field.Name, field.ID) } } - return hasChanged, nil } +func validateUpdateCollectionIndexes( + existingIndexes []client.IndexDescription, + proposedIndexes []client.IndexDescription, +) (bool, error) { + existingNameToIndex := map[string]client.IndexDescription{} + for _, index := range existingIndexes { + existingNameToIndex[index.Name] = index + } + for _, proposedIndex := range proposedIndexes { + if existingIndex, exists := existingNameToIndex[proposedIndex.Name]; exists { + if len(existingIndex.Fields) != len(proposedIndex.Fields) { + return false, ErrCanNotChangeIndexWithPatch + } + for i := range existingIndex.Fields { + if existingIndex.Fields[i] != proposedIndex.Fields[i] { + return false, ErrCanNotChangeIndexWithPatch + } + } + delete(existingNameToIndex, proposedIndex.Name) + } else { + return false, NewErrCannotAddIndexWithPatch(proposedIndex.Name) + } + } + if len(existingNameToIndex) > 0 { + for _, index := range existingNameToIndex { + return false, NewErrCannotDropIndexWithPatch(index.Name) + } + } + return false, nil +} + // getCollectionByVersionId returns the [*collection] at the given [schemaVersionId] version. // // Will return an error if the given key is empty, or not found. @@ -378,7 +458,7 @@ func (db *db) getCollectionByVersionID( schemaVersionId string, ) (*collection, error) { if schemaVersionId == "" { - return nil, ErrSchemaVersionIdEmpty + return nil, ErrSchemaVersionIDEmpty } key := core.NewCollectionSchemaVersionKey(schemaVersionId) @@ -393,12 +473,19 @@ func (db *db) getCollectionByVersionID( return nil, err } - return &collection{ + col := &collection{ db: db, desc: desc, colID: desc.ID, schemaID: desc.Schema.SchemaID, - }, nil + } + + err = col.loadIndexes(ctx, txn) + if err != nil { + return nil, err + } + + return col, nil } // getCollectionByName returns an existing collection within the database. @@ -424,7 +511,7 @@ func (db *db) getCollectionBySchemaID( schemaID string, ) (client.Collection, error) { if schemaID == "" { - return nil, ErrSchemaIdEmpty + return nil, ErrSchemaIDEmpty } key := core.NewCollectionSchemaKey(schemaID) @@ -569,11 +656,13 @@ func (c *collection) SchemaID() string { // handle instead of a raw DB handle. func (c *collection) WithTxn(txn datastore.Txn) client.Collection { return &collection{ - db: c.db, - txn: immutable.Some(txn), - desc: c.desc, - colID: c.colID, - schemaID: c.schemaID, + db: c.db, + txn: immutable.Some(txn), + desc: c.desc, + colID: c.colID, + schemaID: c.schemaID, + indexes: c.indexes, + fetcherFactory: c.fetcherFactory, } } @@ -614,34 +703,25 @@ func (c *collection) CreateMany(ctx context.Context, docs []*client.Document) er func (c *collection) getKeysFromDoc( doc *client.Document, ) (client.DocKey, core.PrimaryDataStoreKey, error) { - // DocKey verification - buf, err := doc.Bytes() - if err != nil { - return client.DocKey{}, core.PrimaryDataStoreKey{}, err - } - // @todo: grab the cid Prefix from the DocKey internal CID if available - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } - // And then feed it some data - doccid, err := pref.Sum(buf) + docKey, err := doc.GenerateDocKey() if err != nil { return client.DocKey{}, core.PrimaryDataStoreKey{}, err } - dockey := client.NewDocKeyV0(doccid) - primaryKey := c.getPrimaryKeyFromDocKey(dockey) + primaryKey := c.getPrimaryKeyFromDocKey(docKey) if primaryKey.DocKey != doc.Key().String() { return client.DocKey{}, core.PrimaryDataStoreKey{}, NewErrDocVerification(doc.Key().String(), primaryKey.DocKey) } - return dockey, primaryKey, nil + return docKey, primaryKey, nil } func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client.Document) error { + // This has to be done before dockey verification happens in the next step. + if err := doc.RemapAliasFieldsAndDockey(c.desc.Schema.Fields); err != nil { + return err + } + dockey, primaryKey, err := c.getKeysFromDoc(doc) if err != nil { return err @@ -653,10 +733,10 @@ func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client. return err } if exists { - return ErrDocumentAlreadyExists + return NewErrDocumentAlreadyExists(primaryKey.DocKey) } if isDeleted { - return ErrDocumentDeleted + return NewErrDocumentDeleted(primaryKey.DocKey) } // write value object marker if we have an empty doc @@ -674,7 +754,7 @@ func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client. return err } - return err + return c.indexNewDoc(ctx, txn, doc) } // Update an existing document with the new values. @@ -696,7 +776,7 @@ func (c *collection) Update(ctx context.Context, doc *client.Document) error { return client.ErrDocumentNotFound } if isDeleted { - return ErrDocumentDeleted + return NewErrDocumentDeleted(primaryKey.DocKey) } err = c.update(ctx, txn, doc) @@ -755,6 +835,12 @@ func (c *collection) save( doc *client.Document, isCreate bool, ) (cid.Cid, error) { + if !isCreate { + err := c.updateIndexedDoc(ctx, txn, doc) + if err != nil { + return cid.Undef, err + } + } // NOTE: We delay the final Clean() call until we know // the commit on the transaction is successful. If we didn't // wait, and just did it here, then *if* the commit fails down @@ -780,11 +866,12 @@ func (c *collection) save( if val.IsDirty() { fieldKey, fieldExists := c.tryGetFieldKey(primaryKey, k) + if !fieldExists { return cid.Undef, client.NewErrFieldNotExist(k) } - fieldDescription, valid := c.desc.GetField(k) + fieldDescription, valid := c.desc.Schema.GetField(k) if !valid { return cid.Undef, client.NewErrFieldNotExist(k) } @@ -886,7 +973,7 @@ func (c *collection) Delete(ctx context.Context, key client.DocKey) (bool, error return false, client.ErrDocumentNotFound } if isDeleted { - return false, ErrDocumentDeleted + return false, NewErrDocumentDeleted(primaryKey.DocKey) } err = c.applyDelete(ctx, txn, primaryKey) @@ -967,7 +1054,11 @@ func (c *collection) saveValueToMerkleCRDT( args ...any) (ipld.Node, uint64, error) { switch ctype { case client.LWW_REGISTER: - field, _ := c.Description().GetFieldByID(key.FieldId) + fieldID, err := strconv.Atoi(key.FieldId) + if err != nil { + return nil, 0, err + } + field, _ := c.Description().GetFieldByID(client.FieldID(fieldID)) merkleCRDT, err := c.db.crdtFactory.InstanceWithStores( txn, core.NewCollectionSchemaVersionKey(c.Schema().VersionID), @@ -1110,5 +1201,6 @@ func (c *collection) tryGetSchemaFieldID(fieldName string) (uint32, bool) { return uint32(field.ID), true } } + return uint32(0), false } diff --git a/db/collection_delete.go b/db/collection_delete.go index acbdb26404..480656849f 100644 --- a/db/collection_delete.go +++ b/db/collection_delete.go @@ -239,7 +239,7 @@ func (c *collection) applyDelete( return client.ErrDocumentNotFound } if isDeleted { - return ErrDocumentDeleted + return NewErrDocumentDeleted(key.DocKey) } dsKey := key.ToDataStoreKey() diff --git a/db/collection_get.go b/db/collection_get.go index 678a154598..17e113231e 100644 --- a/db/collection_get.go +++ b/db/collection_get.go @@ -17,7 +17,6 @@ import ( "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/db/base" - "github.com/sourcenetwork/defradb/db/fetcher" ) func (c *collection) Get(ctx context.Context, key client.DocKey, showDeleted bool) (*client.Document, error) { @@ -37,7 +36,7 @@ func (c *collection) Get(ctx context.Context, key client.DocKey, showDeleted boo return nil, client.ErrDocumentNotFound } - doc, err := c.get(ctx, txn, dsKey, showDeleted) + doc, err := c.get(ctx, txn, dsKey, nil, showDeleted) if err != nil { return nil, err } @@ -48,13 +47,14 @@ func (c *collection) get( ctx context.Context, txn datastore.Txn, key core.PrimaryDataStoreKey, + fields []client.FieldDescription, showDeleted bool, ) (*client.Document, error) { // create a new document fetcher - df := new(fetcher.DocumentFetcher) + df := c.newFetcher() desc := &c.desc // initialize it with the primary index - err := df.Init(&c.desc, nil, false, showDeleted) + err := df.Init(ctx, txn, &c.desc, fields, nil, nil, false, showDeleted) if err != nil { _ = df.Close() return nil, err @@ -63,14 +63,14 @@ func (c *collection) get( // construct target key for DocKey targetKey := base.MakeDocKey(*desc, key.DocKey) // run the doc fetcher - err = df.Start(ctx, txn, core.NewSpans(core.NewSpan(targetKey, targetKey.PrefixEnd()))) + err = df.Start(ctx, core.NewSpans(core.NewSpan(targetKey, targetKey.PrefixEnd()))) if err != nil { _ = df.Close() return nil, err } // return first matched decoded doc - doc, err := df.FetchNextDecoded(ctx) + doc, _, err := df.FetchNextDecoded(ctx) if err != nil { _ = df.Close() return nil, err diff --git a/db/collection_index.go b/db/collection_index.go new file mode 100644 index 0000000000..a3a45ee7d4 --- /dev/null +++ b/db/collection_index.go @@ -0,0 +1,542 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/request/graphql/schema" +) + +// createCollectionIndex creates a new collection index and saves it to the database in its system store. +func (db *db) createCollectionIndex( + ctx context.Context, + txn datastore.Txn, + collectionName string, + desc client.IndexDescription, +) (client.IndexDescription, error) { + col, err := db.getCollectionByName(ctx, txn, collectionName) + if err != nil { + return client.IndexDescription{}, NewErrCanNotReadCollection(collectionName, err) + } + col = col.WithTxn(txn) + return col.CreateIndex(ctx, desc) +} + +func (db *db) dropCollectionIndex( + ctx context.Context, + txn datastore.Txn, + collectionName, indexName string, +) error { + col, err := db.getCollectionByName(ctx, txn, collectionName) + if err != nil { + return NewErrCanNotReadCollection(collectionName, err) + } + col = col.WithTxn(txn) + return col.DropIndex(ctx, indexName) +} + +// getAllIndexes returns all the indexes in the database. +func (db *db) getAllIndexes( + ctx context.Context, + txn datastore.Txn, +) (map[client.CollectionName][]client.IndexDescription, error) { + prefix := core.NewCollectionIndexKey("", "") + + deserializedIndexes, err := deserializePrefix[client.IndexDescription](ctx, + prefix.ToString(), txn.Systemstore()) + + if err != nil { + return nil, err + } + + indexes := make(map[client.CollectionName][]client.IndexDescription) + + for _, indexRec := range deserializedIndexes { + indexKey, err := core.NewCollectionIndexKeyFromString(indexRec.key) + if err != nil { + return nil, NewErrInvalidStoredIndexKey(indexKey.ToString()) + } + indexes[indexKey.CollectionName] = append(indexes[indexKey.CollectionName], indexRec.element) + } + + return indexes, nil +} + +func (db *db) fetchCollectionIndexDescriptions( + ctx context.Context, + txn datastore.Txn, + colName string, +) ([]client.IndexDescription, error) { + prefix := core.NewCollectionIndexKey(colName, "") + deserializedIndexes, err := deserializePrefix[client.IndexDescription](ctx, + prefix.ToString(), txn.Systemstore()) + if err != nil { + return nil, err + } + indexes := make([]client.IndexDescription, 0, len(deserializedIndexes)) + for _, indexRec := range deserializedIndexes { + indexes = append(indexes, indexRec.element) + } + return indexes, nil +} + +func (c *collection) indexNewDoc(ctx context.Context, txn datastore.Txn, doc *client.Document) error { + err := c.loadIndexes(ctx, txn) + if err != nil { + return err + } + for _, index := range c.indexes { + err = index.Save(ctx, txn, doc) + if err != nil { + return err + } + } + return nil +} + +// collectIndexedFields returns all fields that are indexed by all collection indexes. +func (c *collection) collectIndexedFields() []client.FieldDescription { + fieldsMap := make(map[string]client.FieldDescription) + for _, index := range c.indexes { + for _, field := range index.Description().Fields { + for i := range c.desc.Schema.Fields { + colField := c.desc.Schema.Fields[i] + if field.Name == colField.Name { + fieldsMap[field.Name] = colField + break + } + } + } + } + fields := make([]client.FieldDescription, 0, len(fieldsMap)) + for _, field := range fieldsMap { + fields = append(fields, field) + } + return fields +} + +func (c *collection) updateIndexedDoc( + ctx context.Context, + txn datastore.Txn, + doc *client.Document, +) error { + err := c.loadIndexes(ctx, txn) + if err != nil { + return err + } + oldDoc, err := c.get(ctx, txn, c.getPrimaryKeyFromDocKey(doc.Key()), c.collectIndexedFields(), false) + if err != nil { + return err + } + for _, index := range c.indexes { + err = index.Update(ctx, txn, oldDoc, doc) + if err != nil { + return err + } + } + return nil +} + +// CreateIndex creates a new index on the collection. +// +// If the index name is empty, a name will be automatically generated. +// Otherwise its uniqueness will be checked against existing indexes and +// it will be validated with `schema.IsValidIndexName` method. +// +// The provided index description must include at least one field with +// a name that exists in the collection schema. +// Also it's `ID` field must be zero. It will be assigned a unique +// incremental value by the database. +// +// The index description will be stored in the system store. +// +// Once finished, if there are existing documents in the collection, +// the documents will be indexed by the new index. +func (c *collection) CreateIndex( + ctx context.Context, + desc client.IndexDescription, +) (client.IndexDescription, error) { + txn, err := c.getTxn(ctx, false) + if err != nil { + return client.IndexDescription{}, err + } + defer c.discardImplicitTxn(ctx, txn) + + index, err := c.createIndex(ctx, txn, desc) + if err != nil { + return client.IndexDescription{}, err + } + return index.Description(), c.commitImplicitTxn(ctx, txn) +} + +func (c *collection) createIndex( + ctx context.Context, + txn datastore.Txn, + desc client.IndexDescription, +) (CollectionIndex, error) { + if desc.Name != "" && !schema.IsValidIndexName(desc.Name) { + return nil, schema.NewErrIndexWithInvalidName("!") + } + err := validateIndexDescription(desc) + if err != nil { + return nil, err + } + + err = c.checkExistingFields(ctx, desc.Fields) + if err != nil { + return nil, err + } + + indexKey, err := c.generateIndexNameIfNeededAndCreateKey(ctx, txn, &desc) + if err != nil { + return nil, err + } + + colSeq, err := c.db.getSequence(ctx, txn, fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, c.ID())) + if err != nil { + return nil, err + } + colID, err := colSeq.next(ctx, txn) + if err != nil { + return nil, err + } + desc.ID = uint32(colID) + + buf, err := json.Marshal(desc) + if err != nil { + return nil, err + } + + err = txn.Systemstore().Put(ctx, indexKey.ToDS(), buf) + if err != nil { + return nil, err + } + colIndex, err := NewCollectionIndex(c, desc) + if err != nil { + return nil, err + } + c.desc.Indexes = append(c.desc.Indexes, colIndex.Description()) + c.indexes = append(c.indexes, colIndex) + err = c.indexExistingDocs(ctx, txn, colIndex) + if err != nil { + return nil, err + } + return colIndex, nil +} + +func (c *collection) iterateAllDocs( + ctx context.Context, + txn datastore.Txn, + fields []client.FieldDescription, + exec func(doc *client.Document) error, +) error { + df := c.newFetcher() + err := df.Init(ctx, txn, &c.desc, fields, nil, nil, false, false) + if err != nil { + _ = df.Close() + return err + } + start := base.MakeCollectionKey(c.desc) + spans := core.NewSpans(core.NewSpan(start, start.PrefixEnd())) + + err = df.Start(ctx, spans) + if err != nil { + _ = df.Close() + return err + } + + var doc *client.Document + for { + doc, _, err = df.FetchNextDecoded(ctx) + if err != nil { + _ = df.Close() + return err + } + if doc == nil { + break + } + err = exec(doc) + if err != nil { + return err + } + } + + return df.Close() +} + +func (c *collection) indexExistingDocs( + ctx context.Context, + txn datastore.Txn, + index CollectionIndex, +) error { + fields := make([]client.FieldDescription, 0, 1) + for _, field := range index.Description().Fields { + for i := range c.desc.Schema.Fields { + colField := c.desc.Schema.Fields[i] + if field.Name == colField.Name { + fields = append(fields, colField) + break + } + } + } + + return c.iterateAllDocs(ctx, txn, fields, func(doc *client.Document) error { + return index.Save(ctx, txn, doc) + }) +} + +// DropIndex removes an index from the collection. +// +// The index will be removed from the system store. +// +// All index artifacts for existing documents related the index will be removed. +func (c *collection) DropIndex(ctx context.Context, indexName string) error { + txn, err := c.getTxn(ctx, false) + if err != nil { + return err + } + defer c.discardImplicitTxn(ctx, txn) + + err = c.dropIndex(ctx, txn, indexName) + if err != nil { + return err + } + return c.commitImplicitTxn(ctx, txn) +} + +func (c *collection) dropIndex(ctx context.Context, txn datastore.Txn, indexName string) error { + err := c.loadIndexes(ctx, txn) + if err != nil { + return err + } + + var didFind bool + for i := range c.indexes { + if c.indexes[i].Name() == indexName { + err = c.indexes[i].RemoveAll(ctx, txn) + if err != nil { + return err + } + c.indexes = append(c.indexes[:i], c.indexes[i+1:]...) + didFind = true + break + } + } + if !didFind { + return NewErrIndexWithNameDoesNotExists(indexName) + } + + for i := range c.desc.Indexes { + if c.desc.Indexes[i].Name == indexName { + c.desc.Indexes = append(c.desc.Indexes[:i], c.desc.Indexes[i+1:]...) + break + } + } + key := core.NewCollectionIndexKey(c.Name(), indexName) + err = txn.Systemstore().Delete(ctx, key.ToDS()) + if err != nil { + return err + } + + return nil +} + +func (c *collection) dropAllIndexes(ctx context.Context, txn datastore.Txn) error { + prefix := core.NewCollectionIndexKey(c.Name(), "") + + keys, err := fetchKeysForPrefix(ctx, prefix.ToString(), txn.Systemstore()) + if err != nil { + return err + } + + for _, key := range keys { + err = txn.Systemstore().Delete(ctx, key) + if err != nil { + return err + } + } + + return err +} + +func (c *collection) loadIndexes(ctx context.Context, txn datastore.Txn) error { + indexDescriptions, err := c.db.fetchCollectionIndexDescriptions(ctx, txn, c.Name()) + if err != nil { + return err + } + colIndexes := make([]CollectionIndex, 0, len(indexDescriptions)) + for _, indexDesc := range indexDescriptions { + index, err := NewCollectionIndex(c, indexDesc) + if err != nil { + return err + } + colIndexes = append(colIndexes, index) + } + c.desc.Indexes = indexDescriptions + c.indexes = colIndexes + return nil +} + +// GetIndexes returns all indexes for the collection. +func (c *collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { + txn, err := c.getTxn(ctx, false) + if err != nil { + return nil, err + } + defer c.discardImplicitTxn(ctx, txn) + + err = c.loadIndexes(ctx, txn) + if err != nil { + return nil, err + } + return c.desc.Indexes, nil +} + +func (c *collection) checkExistingFields( + ctx context.Context, + fields []client.IndexedFieldDescription, +) error { + collectionFields := c.Description().Schema.Fields + for _, field := range fields { + found := false + for _, colField := range collectionFields { + if field.Name == colField.Name { + found = true + break + } + } + if !found { + return NewErrNonExistingFieldForIndex(field.Name) + } + } + return nil +} + +func (c *collection) generateIndexNameIfNeededAndCreateKey( + ctx context.Context, + txn datastore.Txn, + desc *client.IndexDescription, +) (core.CollectionIndexKey, error) { + var indexKey core.CollectionIndexKey + if desc.Name == "" { + nameIncrement := 1 + for { + desc.Name = generateIndexName(c, desc.Fields, nameIncrement) + indexKey = core.NewCollectionIndexKey(c.Name(), desc.Name) + exists, err := txn.Systemstore().Has(ctx, indexKey.ToDS()) + if err != nil { + return core.CollectionIndexKey{}, err + } + if !exists { + break + } + nameIncrement++ + } + } else { + indexKey = core.NewCollectionIndexKey(c.Name(), desc.Name) + exists, err := txn.Systemstore().Has(ctx, indexKey.ToDS()) + if err != nil { + return core.CollectionIndexKey{}, err + } + if exists { + return core.CollectionIndexKey{}, NewErrIndexWithNameAlreadyExists(desc.Name) + } + } + return indexKey, nil +} + +func validateIndexDescription(desc client.IndexDescription) error { + if desc.ID != 0 { + return NewErrNonZeroIndexIDProvided(desc.ID) + } + if len(desc.Fields) == 0 { + return ErrIndexMissingFields + } + if len(desc.Fields) == 1 && desc.Fields[0].Direction == client.Descending { + return ErrIndexSingleFieldWrongDirection + } + for i := range desc.Fields { + if desc.Fields[i].Name == "" { + return ErrIndexFieldMissingName + } + if desc.Fields[i].Direction == "" { + desc.Fields[i].Direction = client.Ascending + } + } + return nil +} + +func generateIndexName(col client.Collection, fields []client.IndexedFieldDescription, inc int) string { + sb := strings.Builder{} + // at the moment we support only single field indexes that can be stored only in + // ascending order. This will change once we introduce composite indexes. + direction := "ASC" + sb.WriteString(col.Name()) + sb.WriteByte('_') + // we can safely assume that there is at least one field in the slice + // because we validate it before calling this function + sb.WriteString(fields[0].Name) + sb.WriteByte('_') + sb.WriteString(direction) + if inc > 1 { + sb.WriteByte('_') + sb.WriteString(strconv.Itoa(inc)) + } + return sb.String() +} + +type deserializedElement[T any] struct { + key string + element T +} + +func deserializePrefix[T any]( + ctx context.Context, + prefix string, + storage ds.Read, +) ([]deserializedElement[T], error) { + q, err := storage.Query(ctx, query.Query{Prefix: prefix}) + if err != nil { + return nil, NewErrFailedToCreateCollectionQuery(err) + } + + elements := make([]deserializedElement[T], 0) + for res := range q.Next() { + if res.Error != nil { + _ = q.Close() + return nil, res.Error + } + + var element T + err = json.Unmarshal(res.Value, &element) + if err != nil { + _ = q.Close() + return nil, NewErrInvalidStoredIndex(err) + } + elements = append(elements, deserializedElement[T]{key: res.Key, element: element}) + } + if err := q.Close(); err != nil { + return nil, err + } + return elements, nil +} diff --git a/db/collection_update.go b/db/collection_update.go index ea2d0b3980..b945ec4592 100644 --- a/db/collection_update.go +++ b/db/collection_update.go @@ -305,9 +305,19 @@ func (c *collection) applyMerge( return ErrInvalidMergeValueType } - fd, valid := c.desc.GetField(mfield) - if !valid { - return client.NewErrFieldNotExist(mfield) + fd, isValidAliasField := c.desc.Schema.GetField(mfield + request.RelatedObjectID) + if isValidAliasField { + // Overwrite the key with aliased name to the internal related object name. + oldKey := mfield + mfield = mfield + request.RelatedObjectID + mergeMap[mfield] = mval + delete(mergeMap, oldKey) + } else { + var isValidField bool + fd, isValidField = c.desc.Schema.GetField(mfield) + if !isValidField { + return client.NewErrFieldNotExist(mfield) + } } relationFieldDescription, isSecondaryRelationID := c.isSecondaryIDField(fd) @@ -398,7 +408,9 @@ func (c *collection) isSecondaryIDField(fieldDesc client.FieldDescription) (clie return client.FieldDescription{}, false } - relationFieldDescription, valid := c.Description().GetField(strings.TrimSuffix(fieldDesc.Name, "_id")) + relationFieldDescription, valid := c.Description().Schema.GetField( + strings.TrimSuffix(fieldDesc.Name, request.RelatedObjectID), + ) return relationFieldDescription, valid && !relationFieldDescription.IsPrimaryRelation() } @@ -431,7 +443,7 @@ func (c *collection) patchPrimaryDoc( _, err = primaryCol.UpdateWithKey( ctx, primaryDockey, - fmt.Sprintf(`{"%s": "%s"}`, primaryField.Name+"_id", docKey), + fmt.Sprintf(`{"%s": "%s"}`, primaryField.Name+request.RelatedObjectID, docKey), ) if err != nil { return err @@ -491,7 +503,7 @@ func validateFieldSchema(val *fastjson.Value, field client.FieldDescription) (an return getNillableArray(val, getInt64) case client.FieldKind_FOREIGN_OBJECT, client.FieldKind_FOREIGN_OBJECT_ARRAY: - return nil, ErrMergeSubTypeNotSupported + return nil, NewErrFieldOrAliasToFieldNotExist(field.Name) } return nil, client.NewErrUnhandledType("FieldKind", field.Kind) diff --git a/db/db.go b/db/db.go index 656dbdadf7..8ffda296b4 100644 --- a/db/db.go +++ b/db/db.go @@ -28,6 +28,7 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/lens" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/merkle/crdt" "github.com/sourcenetwork/defradb/request/graphql" @@ -58,11 +59,15 @@ type db struct { events events.Events - parser core.Parser + parser core.Parser + lensRegistry client.LensRegistry // The maximum number of retries per transaction. maxTxnRetries immutable.Option[int] + // The maximum number of cached migrations instances to preserve per schema version. + lensPoolSize immutable.Option[int] + // The options used to init the database options any } @@ -88,6 +93,15 @@ func WithMaxRetries(num int) Option { } } +// WithLensPoolSize sets the maximum number of cached migrations instances to preserve per schema version. +// +// Will default to `5` if not set. +func WithLensPoolSize(num int) Option { + return func(db *db) { + db.lensPoolSize = immutable.Some(num) + } +} + // NewDB creates a new instance of the DB using the given options. func NewDB(ctx context.Context, rootstore datastore.RootStore, options ...Option) (client.DB, error) { return newDB(ctx, rootstore, options...) @@ -122,6 +136,10 @@ func newDB(ctx context.Context, rootstore datastore.RootStore, options ...Option opt(db) } + // lensPoolSize may be set by `options`, and because they are funcs on db + // we have to mutate `db` here to set the registry. + db.lensRegistry = lens.NewRegistry(db.lensPoolSize) + err = db.initialize(ctx) if err != nil { return nil, err @@ -162,6 +180,10 @@ func (db *db) systemstore() datastore.DSReaderWriter { return db.multistore.Systemstore() } +func (db *db) LensRegistry() client.LensRegistry { + return db.lensRegistry +} + // Initialize is called when a database is first run and creates all the db global meta data // like Collection ID counters. func (db *db) initialize(ctx context.Context) error { @@ -180,13 +202,19 @@ func (db *db) initialize(ctx context.Context) error { return err } // if we're loading an existing database, just load the schema - // and finish initialization + // and migrations and finish initialization if exists { log.Debug(ctx, "DB has already been initialized, continuing") err = db.loadSchema(ctx, txn) if err != nil { return err } + + err = db.lensRegistry.ReloadLenses(ctx, txn) + if err != nil { + return err + } + // The query language types are only updated on successful commit // so we must not forget to do so on success regardless of whether // we have written to the datastores. diff --git a/db/errors.go b/db/errors.go index 7aa1cc5656..e5b55dcf1a 100644 --- a/db/errors.go +++ b/db/errors.go @@ -16,31 +16,69 @@ import ( ) const ( - errFailedToGetHeads string = "failed to get document heads" - errFailedToCreateCollectionQuery string = "failed to create collection prefix query" - errFailedToGetCollection string = "failed to get collection" - errDocVerification string = "the document verification failed" - errAddingP2PCollection string = "cannot add collection ID" - errRemovingP2PCollection string = "cannot remove collection ID" - errAddCollectionWithPatch string = "unknown collection, adding collections via patch is not supported" - errCollectionIDDoesntMatch string = "CollectionID does not match existing" - errSchemaIDDoesntMatch string = "SchemaID does not match existing" - errCannotModifySchemaName string = "modifying the schema name is not supported" - errCannotSetVersionID string = "setting the VersionID is not supported. It is updated automatically" - errCannotSetFieldID string = "explicitly setting a field ID value is not supported" - errCannotAddRelationalField string = "the adding of new relation fields is not yet supported" - errDuplicateField string = "duplicate field" - errCannotMutateField string = "mutating an existing field is not supported" - errCannotMoveField string = "moving fields is not currently supported" - errInvalidCRDTType string = "only default or LWW (last writer wins) CRDT types are supported" - errCannotDeleteField string = "deleting an existing field is not supported" - errFieldKindNotFound string = "no type found for given name" + errFailedToGetHeads string = "failed to get document heads" + errFailedToCreateCollectionQuery string = "failed to create collection prefix query" + errFailedToGetCollection string = "failed to get collection" + errFailedToGetAllCollections string = "failed to get all collections" + errDocVerification string = "the document verification failed" + errAddingP2PCollection string = "cannot add collection ID" + errRemovingP2PCollection string = "cannot remove collection ID" + errAddCollectionWithPatch string = "unknown collection, adding collections via patch is not supported" + errCollectionIDDoesntMatch string = "CollectionID does not match existing" + errSchemaIDDoesntMatch string = "SchemaID does not match existing" + errCannotModifySchemaName string = "modifying the schema name is not supported" + errCannotSetVersionID string = "setting the VersionID is not supported. It is updated automatically" + errCannotSetFieldID string = "explicitly setting a field ID value is not supported" + errCannotAddRelationalField string = "the adding of new relation fields is not yet supported" + errDuplicateField string = "duplicate field" + errCannotMutateField string = "mutating an existing field is not supported" + errCannotMoveField string = "moving fields is not currently supported" + errInvalidCRDTType string = "only default or LWW (last writer wins) CRDT types are supported" + errCannotDeleteField string = "deleting an existing field is not supported" + errFieldKindNotFound string = "no type found for given name" + errDocumentAlreadyExists string = "a document with the given dockey already exists" + errDocumentDeleted string = "a document with the given dockey has been deleted" + errIndexMissingFields string = "index missing fields" + errNonZeroIndexIDProvided string = "non-zero index ID provided" + errIndexFieldMissingName string = "index field missing name" + errIndexFieldMissingDirection string = "index field missing direction" + errIndexSingleFieldWrongDirection string = "wrong direction for index with a single field" + errIndexWithNameAlreadyExists string = "index with name already exists" + errInvalidStoredIndex string = "invalid stored index" + errInvalidStoredIndexKey string = "invalid stored index key" + errNonExistingFieldForIndex string = "creating an index on a non-existing property" + errCollectionDoesntExisting string = "collection with given name doesn't exist" + errFailedToStoreIndexedField string = "failed to store indexed field" + errFailedToReadStoredIndexDesc string = "failed to read stored index description" + errCanNotDeleteIndexedField string = "can not delete indexed field" + errCanNotAddIndexWithPatch string = "adding indexes via patch is not supported" + errCanNotDropIndexWithPatch string = "dropping indexes via patch is not supported" + errCanNotChangeIndexWithPatch string = "changing indexes via patch is not supported" + errIndexWithNameDoesNotExists string = "index with name doesn't exists" + errInvalidFieldValue string = "invalid field value" + errUnsupportedIndexFieldType string = "unsupported index field type" + errIndexDescriptionHasNoFields string = "index description has no fields" + errIndexDescHasNonExistingField string = "index description has non existing field" + errFieldOrAliasToFieldNotExist string = "The given field or alias to field does not exist" + errCreateFile string = "failed to create file" + errOpenFile string = "failed to open file" + errCloseFile string = "failed to close file" + errRemoveFile string = "failed to remove file" + errFailedToReadByte string = "failed to read byte" + errFailedToWriteString string = "failed to write string" + errJSONDecode string = "failed to decode JSON" + errDocFromMap string = "failed to create a new doc from map" + errDocCreate string = "failed to save a new doc to collection" + errDocUpdate string = "failed to update doc to collection" + errExpectedJSONObject string = "expected JSON object" + errExpectedJSONArray string = "expected JSON array" ) var ( ErrFailedToGetHeads = errors.New(errFailedToGetHeads) ErrFailedToCreateCollectionQuery = errors.New(errFailedToCreateCollectionQuery) ErrFailedToGetCollection = errors.New(errFailedToGetCollection) + ErrFailedToGetAllCollections = errors.New(errFailedToGetAllCollections) // ErrDocVerification occurs when a documents contents fail the verification during a Create() // call against the supplied Document Key. ErrDocVerification = errors.New(errDocVerification) @@ -52,38 +90,60 @@ var ( ErrInvalidMergeValueType = errors.New( "the type of value in the merge patch doesn't match the schema", ) - ErrMissingDocFieldToUpdate = errors.New("missing document field to update") - ErrDocMissingKey = errors.New("document is missing key") - ErrMergeSubTypeNotSupported = errors.New("merge doesn't support sub types yet") - ErrInvalidFilter = errors.New("invalid filter") - ErrInvalidOpPath = errors.New("invalid patch op path") - ErrDocumentAlreadyExists = errors.New("a document with the given dockey already exists") - ErrDocumentDeleted = errors.New("a document with the given dockey has been deleted") - ErrUnknownCRDTArgument = errors.New("invalid CRDT arguments") - ErrUnknownCRDT = errors.New("unknown crdt") - ErrSchemaFirstFieldDocKey = errors.New("collection schema first field must be a DocKey") - ErrCollectionAlreadyExists = errors.New("collection already exists") - ErrCollectionNameEmpty = errors.New("collection name can't be empty") - ErrSchemaIdEmpty = errors.New("schema ID can't be empty") - ErrSchemaVersionIdEmpty = errors.New("schema version ID can't be empty") - ErrKeyEmpty = errors.New("key cannot be empty") - ErrAddingP2PCollection = errors.New(errAddingP2PCollection) - ErrRemovingP2PCollection = errors.New(errRemovingP2PCollection) - ErrAddCollectionWithPatch = errors.New(errAddCollectionWithPatch) - ErrCollectionIDDoesntMatch = errors.New(errCollectionIDDoesntMatch) - ErrSchemaIDDoesntMatch = errors.New(errSchemaIDDoesntMatch) - ErrCannotModifySchemaName = errors.New(errCannotModifySchemaName) - ErrCannotSetVersionID = errors.New(errCannotSetVersionID) - ErrCannotSetFieldID = errors.New(errCannotSetFieldID) - ErrCannotAddRelationalField = errors.New(errCannotAddRelationalField) - ErrDuplicateField = errors.New(errDuplicateField) - ErrCannotMutateField = errors.New(errCannotMutateField) - ErrCannotMoveField = errors.New(errCannotMoveField) - ErrInvalidCRDTType = errors.New(errInvalidCRDTType) - ErrCannotDeleteField = errors.New(errCannotDeleteField) - ErrFieldKindNotFound = errors.New(errFieldKindNotFound) + ErrMissingDocFieldToUpdate = errors.New("missing document field to update") + ErrDocMissingKey = errors.New("document is missing key") + ErrInvalidFilter = errors.New("invalid filter") + ErrInvalidOpPath = errors.New("invalid patch op path") + ErrDocumentAlreadyExists = errors.New(errDocumentAlreadyExists) + ErrDocumentDeleted = errors.New(errDocumentDeleted) + ErrUnknownCRDTArgument = errors.New("invalid CRDT arguments") + ErrUnknownCRDT = errors.New("unknown crdt") + ErrSchemaFirstFieldDocKey = errors.New("collection schema first field must be a DocKey") + ErrCollectionAlreadyExists = errors.New("collection already exists") + ErrCollectionNameEmpty = errors.New("collection name can't be empty") + ErrSchemaIDEmpty = errors.New("schema ID can't be empty") + ErrSchemaVersionIDEmpty = errors.New("schema version ID can't be empty") + ErrKeyEmpty = errors.New("key cannot be empty") + ErrAddingP2PCollection = errors.New(errAddingP2PCollection) + ErrRemovingP2PCollection = errors.New(errRemovingP2PCollection) + ErrAddCollectionWithPatch = errors.New(errAddCollectionWithPatch) + ErrCollectionIDDoesntMatch = errors.New(errCollectionIDDoesntMatch) + ErrSchemaIDDoesntMatch = errors.New(errSchemaIDDoesntMatch) + ErrCannotModifySchemaName = errors.New(errCannotModifySchemaName) + ErrCannotSetVersionID = errors.New(errCannotSetVersionID) + ErrCannotSetFieldID = errors.New(errCannotSetFieldID) + ErrCannotAddRelationalField = errors.New(errCannotAddRelationalField) + ErrDuplicateField = errors.New(errDuplicateField) + ErrCannotMutateField = errors.New(errCannotMutateField) + ErrCannotMoveField = errors.New(errCannotMoveField) + ErrInvalidCRDTType = errors.New(errInvalidCRDTType) + ErrCannotDeleteField = errors.New(errCannotDeleteField) + ErrFieldKindNotFound = errors.New(errFieldKindNotFound) + ErrIndexMissingFields = errors.New(errIndexMissingFields) + ErrIndexFieldMissingName = errors.New(errIndexFieldMissingName) + ErrIndexFieldMissingDirection = errors.New(errIndexFieldMissingDirection) + ErrIndexSingleFieldWrongDirection = errors.New(errIndexSingleFieldWrongDirection) + ErrCanNotChangeIndexWithPatch = errors.New(errCanNotChangeIndexWithPatch) + ErrFieldOrAliasToFieldNotExist = errors.New(errFieldOrAliasToFieldNotExist) + ErrCreateFile = errors.New(errCreateFile) + ErrOpenFile = errors.New(errOpenFile) + ErrCloseFile = errors.New(errCloseFile) + ErrRemoveFile = errors.New(errRemoveFile) + ErrFailedToReadByte = errors.New(errFailedToReadByte) + ErrFailedToWriteString = errors.New(errFailedToWriteString) + ErrJSONDecode = errors.New(errJSONDecode) + ErrDocFromMap = errors.New(errDocFromMap) + ErrDocCreate = errors.New(errDocCreate) + ErrDocUpdate = errors.New(errDocUpdate) + ErrExpectedJSONObject = errors.New(errExpectedJSONObject) + ErrExpectedJSONArray = errors.New(errExpectedJSONArray) ) +// NewErrFieldOrAliasToFieldNotExist returns an error indicating that the given field or an alias field does not exist. +func NewErrFieldOrAliasToFieldNotExist(name string) error { + return errors.New(errFieldOrAliasToFieldNotExist, errors.NewKV("Name", name)) +} + // NewErrFailedToGetHeads returns a new error indicating that the heads of a document // could not be obtained. func NewErrFailedToGetHeads(inner error) error { @@ -96,11 +156,64 @@ func NewErrFailedToCreateCollectionQuery(inner error) error { return errors.Wrap(errFailedToCreateCollectionQuery, inner) } -// NewErrFailedToGetCollection returns a new error indicating that the collection could not be obtained. +// NewErrInvalidStoredIndex returns a new error indicating that the stored +// index in the database is invalid. +func NewErrInvalidStoredIndex(inner error) error { + return errors.Wrap(errInvalidStoredIndex, inner) +} + +// NewErrInvalidStoredIndexKey returns a new error indicating that the stored +// index in the database is invalid. +func NewErrInvalidStoredIndexKey(key string) error { + return errors.New(errInvalidStoredIndexKey, errors.NewKV("Key", key)) +} + +// NewErrNonExistingFieldForIndex returns a new error indicating the attempt to create an index +// on a non-existing field. +func NewErrNonExistingFieldForIndex(field string) error { + return errors.New(errNonExistingFieldForIndex, errors.NewKV("Field", field)) +} + +// NewErrCanNotReadCollection returns a new error indicating the collection doesn't exist. +func NewErrCanNotReadCollection(colName string, inner error) error { + return errors.Wrap(errCollectionDoesntExisting, inner, errors.NewKV("Collection", colName)) +} + +// NewErrFailedToStoreIndexedField returns a new error indicating that the indexed field +// could not be stored. +func NewErrFailedToStoreIndexedField(key string, inner error) error { + return errors.Wrap(errFailedToStoreIndexedField, inner, errors.NewKV("Key", key)) +} + +// NewErrFailedToReadStoredIndexDesc returns a new error indicating that the stored index +// description could not be read. +func NewErrFailedToReadStoredIndexDesc(inner error) error { + return errors.Wrap(errFailedToReadStoredIndexDesc, inner) +} + +// NewCanNotDeleteIndexedField returns a new error a failed attempt to delete an indexed field +func NewCanNotDeleteIndexedField(inner error) error { + return errors.Wrap(errCanNotDeleteIndexedField, inner) +} + +// NewErrNonZeroIndexIDProvided returns a new error indicating that a non-zero index ID was +// provided. +func NewErrNonZeroIndexIDProvided(indexID uint32) error { + return errors.New(errNonZeroIndexIDProvided, errors.NewKV("ID", indexID)) +} + +// NewErrFailedToGetCollection returns a new error indicating that the collection could not +// be obtained. func NewErrFailedToGetCollection(name string, inner error) error { return errors.Wrap(errFailedToGetCollection, inner, errors.NewKV("Name", name)) } +// NewErrFailedToGetAllCollections returns a new error indicating that the collection list could not +// be obtained. +func NewErrFailedToGetAllCollections(inner error) error { + return errors.Wrap(errFailedToGetAllCollections, inner) +} + // NewErrDocVerification returns a new error indicating that the document verification failed. func NewErrDocVerification(expected string, actual string) error { return errors.New( @@ -214,3 +327,162 @@ func NewErrCannotDeleteField(name string, id client.FieldID) error { errors.NewKV("ID", id), ) } + +func NewErrDocumentAlreadyExists(dockey string) error { + return errors.New( + errDocumentAlreadyExists, + errors.NewKV("DocKey", dockey), + ) +} + +func NewErrDocumentDeleted(dockey string) error { + return errors.New( + errDocumentDeleted, + errors.NewKV("DocKey", dockey), + ) +} + +// NewErrIndexWithNameAlreadyExists returns a new error indicating that an index with the +// given name already exists. +func NewErrIndexWithNameAlreadyExists(indexName string) error { + return errors.New( + errIndexWithNameAlreadyExists, + errors.NewKV("Name", indexName), + ) +} + +// NewErrIndexWithNameDoesNotExists returns a new error indicating that an index with the +// given name does not exist. +func NewErrIndexWithNameDoesNotExists(indexName string) error { + return errors.New( + errIndexWithNameDoesNotExists, + errors.NewKV("Name", indexName), + ) +} + +// NewErrCannotAddIndexWithPatch returns a new error indicating that an index cannot be added +// with a patch. +func NewErrCannotAddIndexWithPatch(proposedName string) error { + return errors.New( + errCanNotAddIndexWithPatch, + errors.NewKV("ProposedName", proposedName), + ) +} + +// NewErrCannotDropIndexWithPatch returns a new error indicating that an index cannot be dropped +// with a patch. +func NewErrCannotDropIndexWithPatch(indexName string) error { + return errors.New( + errCanNotDropIndexWithPatch, + errors.NewKV("Name", indexName), + ) +} + +// NewErrInvalidFieldValue returns a new error indicating that the given value is invalid for the +// given field kind. +func NewErrInvalidFieldValue(kind client.FieldKind, value any) error { + return errors.New( + errInvalidFieldValue, + errors.NewKV("Kind", kind), + errors.NewKV("Value", value), + ) +} + +// NewErrUnsupportedIndexFieldType returns a new error indicating that the given field kind is not +// supported for indexing. +func NewErrUnsupportedIndexFieldType(kind client.FieldKind) error { + return errors.New( + errUnsupportedIndexFieldType, + errors.NewKV("Kind", kind), + ) +} + +// NewErrIndexDescHasNoFields returns a new error indicating that the given index +// description has no fields. +func NewErrIndexDescHasNoFields(desc client.IndexDescription) error { + return errors.New( + errIndexDescriptionHasNoFields, + errors.NewKV("Description", desc), + ) +} + +// NewErrIndexDescHasNonExistingField returns a new error indicating that the given index +// description points to a field that does not exist. +func NewErrIndexDescHasNonExistingField(desc client.IndexDescription, fieldName string) error { + return errors.New( + errIndexDescHasNonExistingField, + errors.NewKV("Description", desc), + errors.NewKV("Field name", fieldName), + ) +} + +// NewErrCreateFile returns a new error indicating there was a failure in creating a file. +func NewErrCreateFile(inner error, filepath string) error { + return errors.Wrap(errCreateFile, inner, errors.NewKV("Filepath", filepath)) +} + +// NewErrOpenFile returns a new error indicating there was a failure in opening a file. +func NewErrOpenFile(inner error, filepath string) error { + return errors.Wrap(errOpenFile, inner, errors.NewKV("Filepath", filepath)) +} + +// NewErrCloseFile returns a new error indicating there was a failure in closing a file. +func NewErrCloseFile(closeErr, other error) error { + if other != nil { + return errors.Wrap(errCloseFile, closeErr, errors.NewKV("Other error", other)) + } + return errors.Wrap(errCloseFile, closeErr) +} + +// NewErrRemoveFile returns a new error indicating there was a failure in removing a file. +func NewErrRemoveFile(removeErr, other error, filepath string) error { + if other != nil { + return errors.Wrap( + errRemoveFile, + removeErr, + errors.NewKV("Other error", other), + errors.NewKV("Filepath", filepath), + ) + } + return errors.Wrap( + errRemoveFile, + removeErr, + errors.NewKV("Filepath", filepath), + ) +} + +// NewErrFailedToReadByte returns a new error indicating there was a failure in read a byte +// from the Reader +func NewErrFailedToReadByte(inner error) error { + return errors.Wrap(errFailedToReadByte, inner) +} + +// NewErrFailedToWriteString returns a new error indicating there was a failure in writing +// a string to the Writer +func NewErrFailedToWriteString(inner error) error { + return errors.Wrap(errFailedToWriteString, inner) +} + +// NewErrJSONDecode returns a new error indicating there was a failure in decoding some JSON +// from the JSON decoder +func NewErrJSONDecode(inner error) error { + return errors.Wrap(errJSONDecode, inner) +} + +// NewErrDocFromMap returns a new error indicating there was a failure to create +// a new doc from a map +func NewErrDocFromMap(inner error) error { + return errors.Wrap(errDocFromMap, inner) +} + +// NewErrDocCreate returns a new error indicating there was a failure to save +// a new doc to a collection +func NewErrDocCreate(inner error) error { + return errors.Wrap(errDocCreate, inner) +} + +// NewErrDocUpdate returns a new error indicating there was a failure to update +// a doc to a collection +func NewErrDocUpdate(inner error) error { + return errors.Wrap(errDocUpdate, inner) +} diff --git a/db/fetcher/encoded_doc.go b/db/fetcher/encoded_doc.go index a141c50652..ec3803a2fa 100644 --- a/db/fetcher/encoded_doc.go +++ b/db/fetcher/encoded_doc.go @@ -11,15 +11,26 @@ package fetcher import ( - "fmt" - + "github.com/bits-and-blooms/bitset" "github.com/fxamacker/cbor/v2" - "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" ) +type EncodedDocument interface { + // Key returns the key of the document + Key() []byte + SchemaVersionID() string + // Reset re-initializes the EncodedDocument object. + Reset() + // Decode returns a properly decoded document object + Decode() (*client.Document, error) + // DecodeToDoc returns a decoded document as a + // map of field/value pairs + DecodeToDoc() (core.Doc, error) +} + type EPTuple []encProperty // EncProperty is an encoded property of a EncodedDocument @@ -27,199 +38,122 @@ type encProperty struct { Desc client.FieldDescription Raw []byte + // Filter flag to determine if this flag + // is needed for eager filter evaluation + IsFilter bool + // // encoding meta data // encoding base.DataEncoding } // Decode returns the decoded value and CRDT type for the given property. -func (e encProperty) Decode() (client.CType, any, error) { - ctype := client.CType(e.Raw[0]) - buf := e.Raw[1:] +func (e encProperty) Decode() (any, error) { var val any - err := cbor.Unmarshal(buf, &val) + err := cbor.Unmarshal(e.Raw, &val) if err != nil { - return ctype, nil, err - } - - if array, isArray := val.([]any); isArray { - var ok bool - switch e.Desc.Kind { - case client.FieldKind_BOOL_ARRAY: - boolArray := make([]bool, len(array)) - for i, untypedValue := range array { - boolArray[i], ok = untypedValue.(bool) - if !ok { - return ctype, nil, client.NewErrUnexpectedType[bool](e.Desc.Name, untypedValue) - } - } - val = boolArray - - case client.FieldKind_NILLABLE_BOOL_ARRAY: - val, err = convertNillableArray[bool](e.Desc.Name, array) - if err != nil { - return ctype, nil, err - } - - case client.FieldKind_INT_ARRAY: - intArray := make([]int64, len(array)) - for i, untypedValue := range array { - intArray[i], err = convertToInt(fmt.Sprintf("%s[%v]", e.Desc.Name, i), untypedValue) - if err != nil { - return ctype, nil, err - } - } - val = intArray - - case client.FieldKind_NILLABLE_INT_ARRAY: - val, err = convertNillableArrayWithConverter(e.Desc.Name, array, convertToInt) - if err != nil { - return ctype, nil, err - } - - case client.FieldKind_FLOAT_ARRAY: - floatArray := make([]float64, len(array)) - for i, untypedValue := range array { - floatArray[i], ok = untypedValue.(float64) - if !ok { - return ctype, nil, client.NewErrUnexpectedType[float64](e.Desc.Name, untypedValue) - } - } - val = floatArray - - case client.FieldKind_NILLABLE_FLOAT_ARRAY: - val, err = convertNillableArray[float64](e.Desc.Name, array) - if err != nil { - return ctype, nil, err - } - - case client.FieldKind_STRING_ARRAY: - stringArray := make([]string, len(array)) - for i, untypedValue := range array { - stringArray[i], ok = untypedValue.(string) - if !ok { - return ctype, nil, client.NewErrUnexpectedType[string](e.Desc.Name, untypedValue) - } - } - val = stringArray - - case client.FieldKind_NILLABLE_STRING_ARRAY: - val, err = convertNillableArray[string](e.Desc.Name, array) - if err != nil { - return ctype, nil, err - } - } - } else { // CBOR often encodes values typed as floats as ints - switch e.Desc.Kind { - case client.FieldKind_FLOAT: - switch v := val.(type) { - case int64: - return ctype, float64(v), nil - case int: - return ctype, float64(v), nil - case uint64: - return ctype, float64(v), nil - case uint: - return ctype, float64(v), nil - } - } + return nil, err } - return ctype, val, nil + return core.DecodeFieldValue(e.Desc, val) } -func convertNillableArray[T any](propertyName string, items []any) ([]immutable.Option[T], error) { - resultArray := make([]immutable.Option[T], len(items)) - for i, untypedValue := range items { - if untypedValue == nil { - resultArray[i] = immutable.None[T]() - continue - } - value, ok := untypedValue.(T) - if !ok { - return nil, client.NewErrUnexpectedType[T](fmt.Sprintf("%s[%v]", propertyName, i), untypedValue) - } - resultArray[i] = immutable.Some(value) - } - return resultArray, nil +// @todo: Implement Encoded Document type +type encodedDocument struct { + mapping *core.DocumentMapping + doc *core.Doc + + key []byte + schemaVersionID string + Properties map[client.FieldDescription]*encProperty + + // tracking bitsets + // A value of 1 indicates a required field + // 0 means we we ignore the field + // we update the bitsets as we collect values + // by clearing the bit for the FieldID + filterSet *bitset.BitSet // filter fields + selectSet *bitset.BitSet // select fields } -func convertNillableArrayWithConverter[TOut any]( - propertyName string, - items []any, - converter func(propertyName string, in any) (TOut, error), -) ([]immutable.Option[TOut], error) { - resultArray := make([]immutable.Option[TOut], len(items)) - for i, untypedValue := range items { - if untypedValue == nil { - resultArray[i] = immutable.None[TOut]() - continue - } - value, err := converter(fmt.Sprintf("%s[%v]", propertyName, i), untypedValue) - if err != nil { - return nil, err - } - resultArray[i] = immutable.Some(value) - } - return resultArray, nil -} +var _ EncodedDocument = (*encodedDocument)(nil) -func convertToInt(propertyName string, untypedValue any) (int64, error) { - switch value := untypedValue.(type) { - case uint64: - return int64(value), nil - case int64: - return value, nil - case float64: - return int64(value), nil - default: - return 0, client.NewErrUnexpectedType[string](propertyName, untypedValue) - } +func (encdoc *encodedDocument) Key() []byte { + return encdoc.key } -// @todo: Implement Encoded Document type -type encodedDocument struct { - Key []byte - Properties map[client.FieldDescription]*encProperty +func (encdoc *encodedDocument) SchemaVersionID() string { + return encdoc.schemaVersionID } // Reset re-initializes the EncodedDocument object. func (encdoc *encodedDocument) Reset() { - encdoc.Properties = make(map[client.FieldDescription]*encProperty) - encdoc.Key = nil + encdoc.Properties = make(map[client.FieldDescription]*encProperty, 0) + encdoc.key = nil + if encdoc.mapping != nil { + doc := encdoc.mapping.NewDoc() + encdoc.doc = &doc + } + encdoc.filterSet = nil + encdoc.selectSet = nil + encdoc.schemaVersionID = "" } // Decode returns a properly decoded document object func (encdoc *encodedDocument) Decode() (*client.Document, error) { - key, err := client.NewDocKeyFromString(string(encdoc.Key)) + key, err := client.NewDocKeyFromString(string(encdoc.key)) if err != nil { return nil, err } doc := client.NewDocWithKey(key) - for fieldDesc, prop := range encdoc.Properties { - ctype, val, err := prop.Decode() + for _, prop := range encdoc.Properties { + val, err := prop.Decode() if err != nil { return nil, err } - err = doc.SetAs(fieldDesc.Name, val, ctype) + err = doc.SetAs(prop.Desc.Name, val, prop.Desc.Typ) if err != nil { return nil, err } } + doc.SchemaVersionID = encdoc.SchemaVersionID() + return doc, nil } // DecodeToDoc returns a decoded document as a // map of field/value pairs -func (encdoc *encodedDocument) DecodeToDoc(mapping *core.DocumentMapping) (core.Doc, error) { - doc := mapping.NewDoc() - doc.SetKey(string(encdoc.Key)) - for fieldDesc, prop := range encdoc.Properties { - _, val, err := prop.Decode() +func (encdoc *encodedDocument) DecodeToDoc() (core.Doc, error) { + return encdoc.decodeToDoc(false) +} + +func (encdoc *encodedDocument) decodeToDocForFilter() (core.Doc, error) { + return encdoc.decodeToDoc(true) +} + +func (encdoc *encodedDocument) decodeToDoc(filter bool) (core.Doc, error) { + if encdoc.mapping == nil { + return core.Doc{}, ErrMissingMapper + } + if encdoc.doc == nil { + doc := encdoc.mapping.NewDoc() + encdoc.doc = &doc + } + encdoc.doc.SetKey(string(encdoc.key)) + for _, prop := range encdoc.Properties { + if encdoc.doc.Fields[prop.Desc.ID] != nil { // used cached decoded fields + continue + } + if filter && !prop.IsFilter { // only get filter fields if filter=true + continue + } + val, err := prop.Decode() if err != nil { return core.Doc{}, err } - doc.Fields[fieldDesc.ID] = val + encdoc.doc.Fields[prop.Desc.ID] = val } - return doc, nil + + encdoc.doc.SchemaVersionID = encdoc.SchemaVersionID() + return *encdoc.doc, nil } diff --git a/db/fetcher/errors.go b/db/fetcher/errors.go index 31453e8ad6..84d947c46f 100644 --- a/db/fetcher/errors.go +++ b/db/fetcher/errors.go @@ -25,6 +25,7 @@ const ( errVFetcherFailedToDecodeNode string = "(version fetcher) failed to decode protobuf" errVFetcherFailedToGetDagLink string = "(version fetcher) failed to get node link from DAG" errFailedToGetDagNode string = "failed to get DAG Node" + errMissingMapper string = "missing document mapper" ) var ( @@ -38,6 +39,7 @@ var ( ErrVFetcherFailedToDecodeNode = errors.New(errVFetcherFailedToDecodeNode) ErrVFetcherFailedToGetDagLink = errors.New(errVFetcherFailedToGetDagLink) ErrFailedToGetDagNode = errors.New(errFailedToGetDagNode) + ErrMissingMapper = errors.New(errMissingMapper) ErrSingleSpanOnly = errors.New("spans must contain only a single entry") ) diff --git a/db/fetcher/fetcher.go b/db/fetcher/fetcher.go index 5de2a8899e..35a89c29c0 100644 --- a/db/fetcher/fetcher.go +++ b/db/fetcher/fetcher.go @@ -13,7 +13,9 @@ package fetcher import ( "bytes" "context" + "strings" + "github.com/bits-and-blooms/bitset" dsq "github.com/ipfs/go-datastore/query" "github.com/sourcenetwork/defradb/client" @@ -21,19 +23,56 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/iterable" "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/request/graphql/parser" ) +// ExecInfo contains statistics about the fetcher execution. +type ExecInfo struct { + // Number of documents fetched. + DocsFetched uint64 + // Number of fields fetched. + FieldsFetched uint64 +} + +// Add adds the other ExecInfo to the current ExecInfo. +func (s *ExecInfo) Add(other ExecInfo) { + s.DocsFetched += other.DocsFetched + s.FieldsFetched += other.FieldsFetched +} + +// Reset resets the ExecInfo. +func (s *ExecInfo) Reset() { + s.DocsFetched = 0 + s.FieldsFetched = 0 +} + // Fetcher is the interface for collecting documents from the underlying data store. // It handles all the key/value scanning, aggregation, and document encoding. type Fetcher interface { - Init(col *client.CollectionDescription, fields []*client.FieldDescription, reverse bool, showDeleted bool) error - Start(ctx context.Context, txn datastore.Txn, spans core.Spans) error - FetchNext(ctx context.Context) (*encodedDocument, error) - FetchNextDecoded(ctx context.Context) (*client.Document, error) - FetchNextDoc(ctx context.Context, mapping *core.DocumentMapping) ([]byte, core.Doc, error) + Init( + ctx context.Context, + txn datastore.Txn, + col *client.CollectionDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docmapper *core.DocumentMapping, + reverse bool, + showDeleted bool, + ) error + Start(ctx context.Context, spans core.Spans) error + FetchNext(ctx context.Context) (EncodedDocument, ExecInfo, error) + FetchNextDecoded(ctx context.Context) (*client.Document, ExecInfo, error) + FetchNextDoc(ctx context.Context, mapping *core.DocumentMapping) ([]byte, core.Doc, ExecInfo, error) Close() error } +// keyValue is a KV store response containing the resulting core.Key and byte array value. +type keyValue struct { + Key core.DataStoreKey + Value []byte +} + var ( _ Fetcher = (*DocumentFetcher)(nil) ) @@ -48,14 +87,35 @@ type DocumentFetcher struct { order []dsq.Order curSpanIndex int - schemaFields map[uint32]client.FieldDescription - fields []*client.FieldDescription + filter *mapper.Filter + ranFilter bool // did we run the filter + passedFilter bool // did we pass the filter + + filterFields map[uint32]client.FieldDescription + selectFields map[uint32]client.FieldDescription + + // static bitset to which stores the IDs of fields + // needed for filtering. + // + // This is compared against the encdoc.filterSet which + // is a dynamic bitset, that gets updated as fields are + // added to the encdoc, and cleared on reset. + // + // We compare the two bitsets to determine if we've collected + // all the necessary fields to run the filter. + // + // This is *much* more effecient for comparison then most (any?) + // other approach. + // + // When proper seek() is added, this will also be responsible + // for effectiently finding the next field to seek to. + filterSet *bitset.BitSet + + doc *encodedDocument - doc *encodedDocument - decodedDoc *client.Document initialized bool - kv *core.KeyValue + kv *keyValue kvIter iterable.Iterator kvResultsIter dsq.Results kvEnd bool @@ -65,20 +125,27 @@ type DocumentFetcher struct { // we use a parallel fetcher to be able to return the documents in the expected order. // That being lexicographically ordered dockeys. deletedDocFetcher *DocumentFetcher + + execInfo ExecInfo } // Init implements DocumentFetcher. func (df *DocumentFetcher) Init( + ctx context.Context, + txn datastore.Txn, col *client.CollectionDescription, - fields []*client.FieldDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docmapper *core.DocumentMapping, reverse bool, showDeleted bool, ) error { + df.txn = txn if col.Schema.IsEmpty() { return client.NewErrUninitializeProperty("DocumentFetcher", "Schema") } - err := df.init(col, fields, reverse) + err := df.init(col, fields, filter, docmapper, reverse) if err != nil { return err } @@ -86,8 +153,9 @@ func (df *DocumentFetcher) Init( if showDeleted { if df.deletedDocFetcher == nil { df.deletedDocFetcher = new(DocumentFetcher) + df.deletedDocFetcher.txn = txn } - return df.deletedDocFetcher.init(col, fields, reverse) + return df.deletedDocFetcher.init(col, fields, filter, docmapper, reverse) } return nil @@ -95,15 +163,22 @@ func (df *DocumentFetcher) Init( func (df *DocumentFetcher) init( col *client.CollectionDescription, - fields []*client.FieldDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docMapper *core.DocumentMapping, reverse bool, ) error { df.col = col - df.fields = fields df.reverse = reverse df.initialized = true + df.filter = filter df.isReadingDocument = false df.doc = new(encodedDocument) + df.doc.mapping = docMapper + + if df.filter != nil && docMapper == nil { + return ErrMissingMapper + } if df.kvResultsIter != nil { if err := df.kvResultsIter.Close(); err != nil { @@ -118,28 +193,52 @@ func (df *DocumentFetcher) init( } df.kvIter = nil - df.schemaFields = make(map[uint32]client.FieldDescription) - for _, field := range col.Schema.Fields { - df.schemaFields[uint32(field.ID)] = field + df.selectFields = make(map[uint32]client.FieldDescription, len(fields)) + // if we haven't been told to get specific fields + // get them all + var targetFields []client.FieldDescription + if len(fields) == 0 { + targetFields = df.col.Schema.Fields + } else { + targetFields = fields + } + + for _, field := range targetFields { + df.selectFields[uint32(field.ID)] = field } + + if df.filter != nil { + conditions := df.filter.ToMap(df.doc.mapping) + parsedfilterFields, err := parser.ParseFilterFieldsForDescription(conditions, df.col.Schema) + if err != nil { + return err + } + df.filterFields = make(map[uint32]client.FieldDescription, len(parsedfilterFields)) + df.filterSet = bitset.New(uint(len(col.Schema.Fields))) + for _, field := range parsedfilterFields { + df.filterFields[uint32(field.ID)] = field + df.filterSet.Set(uint(field.ID)) + } + } + return nil } -func (df *DocumentFetcher) Start(ctx context.Context, txn datastore.Txn, spans core.Spans) error { - err := df.start(ctx, txn, spans, false) +func (df *DocumentFetcher) Start(ctx context.Context, spans core.Spans) error { + err := df.start(ctx, spans, false) if err != nil { return err } if df.deletedDocFetcher != nil { - return df.deletedDocFetcher.start(ctx, txn, spans, true) + return df.deletedDocFetcher.start(ctx, spans, true) } return nil } // Start implements DocumentFetcher. -func (df *DocumentFetcher) start(ctx context.Context, txn datastore.Txn, spans core.Spans, withDeleted bool) error { +func (df *DocumentFetcher) start(ctx context.Context, spans core.Spans, withDeleted bool) error { if df.col == nil { return client.NewErrUninitializeProperty("DocumentFetcher", "CollectionDescription") } @@ -176,7 +275,6 @@ func (df *DocumentFetcher) start(ctx context.Context, txn datastore.Txn, spans c } df.curSpanIndex = -1 - df.txn = txn if df.reverse { df.order = []dsq.Order{dsq.OrderByKeyDescending{}} @@ -218,39 +316,36 @@ func (df *DocumentFetcher) startNextSpan(ctx context.Context) (bool, error) { } df.curSpanIndex = nextSpanIndex - _, err = df.nextKey(ctx) + _, _, err = df.nextKey(ctx, false) return err == nil, err } -func (df *DocumentFetcher) KVEnd() bool { - return df.kvEnd -} - -func (df *DocumentFetcher) KV() *core.KeyValue { - return df.kv -} - -func (df *DocumentFetcher) NextKey(ctx context.Context) (docDone bool, err error) { - return df.nextKey(ctx) -} - -func (df *DocumentFetcher) NextKV() (iterDone bool, kv *core.KeyValue, err error) { - return df.nextKV() -} - -func (df *DocumentFetcher) ProcessKV(kv *core.KeyValue) error { - return df.processKV(kv) -} - // nextKey gets the next kv. It sets both kv and kvEnd internally. -// It returns true if the current doc is completed -func (df *DocumentFetcher) nextKey(ctx context.Context) (spanDone bool, err error) { - // get the next kv from nextKV() - spanDone, df.kv, err = df.nextKV() - // handle any internal errors - if err != nil { - return false, err +// It returns true if the current doc is completed. +// The first call to nextKey CANNOT have seekNext be true (ErrFailedToSeek) +func (df *DocumentFetcher) nextKey(ctx context.Context, seekNext bool) (spanDone bool, docDone bool, err error) { + // safety against seekNext on first call + if seekNext && df.kv == nil { + return false, false, ErrFailedToSeek + } + + if seekNext { + curKey := df.kv.Key + curKey.FieldId = "" // clear field so prefixEnd applies to dockey + seekKey := curKey.PrefixEnd().ToString() + spanDone, df.kv, err = df.seekKV(seekKey) + // handle any internal errors + if err != nil { + return false, false, err + } + } else { + spanDone, df.kv, err = df.nextKV() + // handle any internal errors + if err != nil { + return false, false, err + } } + if df.kv != nil && (df.kv.Key.InstanceType != core.ValueKey && df.kv.Key.InstanceType != core.DeletedKey) { // We can only ready value values, if we escape the collection's value keys // then we must be done and can stop reading @@ -259,50 +354,103 @@ func (df *DocumentFetcher) nextKey(ctx context.Context) (spanDone bool, err erro df.kvEnd = spanDone if df.kvEnd { - _, err := df.startNextSpan(ctx) + err = df.kvResultsIter.Close() if err != nil { - return false, err + return false, false, err + } + moreSpans, err := df.startNextSpan(ctx) + if err != nil { + return false, false, err } - return true, nil + df.isReadingDocument = false + return !moreSpans, true, nil } // check if we've crossed document boundries - if df.doc.Key != nil && df.kv.Key.DocKey != string(df.doc.Key) { + if (df.doc.key != nil && df.kv.Key.DocKey != string(df.doc.key)) || seekNext { df.isReadingDocument = false - return true, nil + return false, true, nil } - return false, nil + return false, false, nil } // nextKV is a lower-level utility compared to nextKey. The differences are as follows: // - It directly interacts with the KVIterator. // - Returns true if the entire iterator/span is exhausted // - Returns a kv pair instead of internally updating -func (df *DocumentFetcher) nextKV() (iterDone bool, kv *core.KeyValue, err error) { +func (df *DocumentFetcher) nextKV() (iterDone bool, kv *keyValue, err error) { + done, dsKey, res, err := df.nextKVRaw() + if done || err != nil { + return done, nil, err + } + + kv = &keyValue{ + Key: dsKey, + Value: res.Value, + } + return false, kv, nil +} + +// seekKV will seek through results/iterator until it reaches +// the target key, or if the target key doesn't exist, the +// next smallest key that is greater than the target. +func (df *DocumentFetcher) seekKV(key string) (bool, *keyValue, error) { + // make sure the current kv is *before* the target key + switch strings.Compare(df.kv.Key.ToString(), key) { + case 0: + // equal, we should just return the kv state + return df.kvEnd, df.kv, nil + case 1: + // greater, error + return false, nil, NewErrFailedToSeek(key, nil) + } + + for { + done, dsKey, res, err := df.nextKVRaw() + if done || err != nil { + return done, nil, err + } + + switch strings.Compare(dsKey.ToString(), key) { + case -1: + // before, so lets seek again + continue + case 0, 1: + // equal or greater (first), return a formatted kv + kv := &keyValue{ + Key: dsKey, + Value: res.Value, // @todo make lazy + } + return false, kv, nil + } + } +} + +// nextKV is a lower-level utility compared to nextKey. The differences are as follows: +// - It directly interacts with the KVIterator. +// - Returns true if the entire iterator/span is exhausted +// - Returns a kv pair instead of internally updating +func (df *DocumentFetcher) nextKVRaw() (bool, core.DataStoreKey, dsq.Result, error) { res, available := df.kvResultsIter.NextSync() if !available { - return true, nil, nil + return true, core.DataStoreKey{}, res, nil } - err = res.Error + err := res.Error if err != nil { - return true, nil, err + return true, core.DataStoreKey{}, res, err } dsKey, err := core.NewDataStoreKey(res.Key) if err != nil { - return true, nil, err + return true, core.DataStoreKey{}, res, err } - kv = &core.KeyValue{ - Key: dsKey, - Value: res.Value, - } - return false, kv, nil + return false, dsKey, res, nil } // processKV continuously processes the key value pairs we've received // and step by step constructs the current encoded document -func (df *DocumentFetcher) processKV(kv *core.KeyValue) error { +func (df *DocumentFetcher) processKV(kv *keyValue) error { // skip MerkleCRDT meta-data priority key-value pair // implement here <-- // instance := kv.Key.Name() @@ -316,7 +464,22 @@ func (df *DocumentFetcher) processKV(kv *core.KeyValue) error { if !df.isReadingDocument { df.isReadingDocument = true df.doc.Reset() - df.doc.Key = []byte(kv.Key.DocKey) + + // re-init doc state + if df.filterSet != nil { + df.doc.filterSet = bitset.New(df.filterSet.Len()) + if df.filterSet.Test(0) { + df.doc.filterSet.Set(0) // mark dockey as set + } + } + df.doc.key = []byte(kv.Key.DocKey) + df.passedFilter = false + df.ranFilter = false + } + + if kv.Key.FieldId == core.DATASTORE_DOC_VERSION_FIELD_ID { + df.doc.schemaVersionID = string(kv.Value) + return nil } // we have to skip the object marker @@ -329,53 +492,110 @@ func (df *DocumentFetcher) processKV(kv *core.KeyValue) error { if err != nil { return err } - fieldDesc, exists := df.schemaFields[fieldID] + fieldDesc, exists := df.selectFields[fieldID] if !exists { - return NewErrFieldIdNotFound(fieldID) + fieldDesc, exists = df.filterFields[fieldID] + if !exists { + return nil // if we can't find this field in our sets, just ignore it + } } - // @todo: Secondary Index might not have encoded FieldIDs - // @body: Need to generalized the processKV, and overall Fetcher architecture - // to better handle dynamic use cases beyond primary indexes. If a - // secondary index is provided, we need to extract the indexed/implicit fields - // from the KV pair. - df.doc.Properties[fieldDesc] = &encProperty{ + ufid := uint(fieldID) + + property := &encProperty{ Desc: fieldDesc, Raw: kv.Value, } - // @todo: Extract Index implicit/stored keys + + if df.filterSet != nil && df.filterSet.Test(ufid) { + df.doc.filterSet.Set(ufid) + property.IsFilter = true + } + + df.execInfo.FieldsFetched++ + + df.doc.Properties[fieldDesc] = property + return nil } // FetchNext returns a raw binary encoded document. It iterates over all the relevant // keypairs from the underlying store and constructs the document. -func (df *DocumentFetcher) FetchNext(ctx context.Context) (*encodedDocument, error) { +func (df *DocumentFetcher) FetchNext(ctx context.Context) (EncodedDocument, ExecInfo, error) { if df.kvEnd { - return nil, nil + return nil, ExecInfo{}, nil } if df.kv == nil { - return nil, client.NewErrUninitializeProperty("DocumentFetcher", "kv") + return nil, ExecInfo{}, client.NewErrUninitializeProperty("DocumentFetcher", "kv") } // save the DocKey of the current kv pair so we can track when we cross the doc pair boundries // keyparts := df.kv.Key.List() // key := keyparts[len(keyparts)-2] + df.execInfo.Reset() // iterate until we have collected all the necessary kv pairs for the doc // we'll know when were done when either // A) Reach the end of the iterator for { err := df.processKV(df.kv) if err != nil { - return nil, err + return nil, ExecInfo{}, err + } + + if df.filter != nil { + // only run filter if we've collected all the fields + // required for filtering. This is tracked by the bitsets. + if df.filterSet.Equal(df.doc.filterSet) { + filterDoc, err := df.doc.decodeToDocForFilter() + if err != nil { + return nil, ExecInfo{}, err + } + + df.ranFilter = true + df.passedFilter, err = mapper.RunFilter(filterDoc, df.filter) + if err != nil { + return nil, ExecInfo{}, err + } + } } - end, err := df.nextKey(ctx) + // if we don't pass the filter (ran and pass) + // theres no point in collecting other select fields + // so we seek to the next doc + spansDone, docDone, err := df.nextKey(ctx, !df.passedFilter && df.ranFilter) if err != nil { - return nil, err + return nil, ExecInfo{}, err } - if end { - return df.doc, nil + + if docDone { + df.execInfo.DocsFetched++ + if df.filter != nil { + // if we passed, return + if df.passedFilter { + return df.doc, df.execInfo, nil + } else if !df.ranFilter { // if we didn't run, run it + decodedDoc, err := df.doc.DecodeToDoc() + if err != nil { + return nil, ExecInfo{}, err + } + df.passedFilter, err = mapper.RunFilter(decodedDoc, df.filter) + if err != nil { + return nil, ExecInfo{}, err + } + if df.passedFilter { + return df.doc, df.execInfo, nil + } + } + } else { + return df.doc, df.execInfo, nil + } + + if !spansDone { + continue + } + + return nil, df.execInfo, nil } // // crossed document kv boundary? @@ -389,21 +609,21 @@ func (df *DocumentFetcher) FetchNext(ctx context.Context) (*encodedDocument, err } // FetchNextDecoded implements DocumentFetcher -func (df *DocumentFetcher) FetchNextDecoded(ctx context.Context) (*client.Document, error) { - encdoc, err := df.FetchNext(ctx) +func (df *DocumentFetcher) FetchNextDecoded(ctx context.Context) (*client.Document, ExecInfo, error) { + encdoc, execInfo, err := df.FetchNext(ctx) if err != nil { - return nil, err + return nil, ExecInfo{}, err } if encdoc == nil { - return nil, nil + return nil, ExecInfo{}, nil } - df.decodedDoc, err = encdoc.Decode() + decodedDoc, err := encdoc.Decode() if err != nil { - return nil, err + return nil, ExecInfo{}, err } - return df.decodedDoc, nil + return decodedDoc, execInfo, nil } // FetchNextDoc returns the next document as a core.Doc. @@ -411,10 +631,11 @@ func (df *DocumentFetcher) FetchNextDecoded(ctx context.Context) (*client.Docume func (df *DocumentFetcher) FetchNextDoc( ctx context.Context, mapping *core.DocumentMapping, -) ([]byte, core.Doc, error) { +) ([]byte, core.Doc, ExecInfo, error) { var err error - var encdoc *encodedDocument + var encdoc EncodedDocument var status client.DocumentStatus + var resultExecInfo ExecInfo // If the deletedDocFetcher isn't nil, this means that the user requested to include the deleted documents // in the query. To keep the active and deleted docs in lexicographic order of dockeys, we use the two distinct @@ -423,22 +644,16 @@ func (df *DocumentFetcher) FetchNextDoc( if ddf != nil { // If we've reached the end of the deleted docs, we can skip to getting the next active docs. if !ddf.kvEnd { - if df.reverse { - if df.kvEnd || ddf.kv.Key.DocKey > df.kv.Key.DocKey { - encdoc, err = ddf.FetchNext(ctx) - if err != nil { - return nil, core.Doc{}, err - } - status = client.Deleted - } - } else { - if df.kvEnd || ddf.kv.Key.DocKey < df.kv.Key.DocKey { - encdoc, err = ddf.FetchNext(ctx) - if err != nil { - return nil, core.Doc{}, err - } - status = client.Deleted + if df.kvEnd || + (df.reverse && ddf.kv.Key.DocKey > df.kv.Key.DocKey) || + (!df.reverse && ddf.kv.Key.DocKey < df.kv.Key.DocKey) { + var execInfo ExecInfo + encdoc, execInfo, err = ddf.FetchNext(ctx) + if err != nil { + return nil, core.Doc{}, ExecInfo{}, err } + status = client.Deleted + resultExecInfo.Add(execInfo) } } } @@ -446,42 +661,40 @@ func (df *DocumentFetcher) FetchNextDoc( // At this point id encdoc is nil, it means that the next document to be // returned will be from the active ones. if encdoc == nil { - encdoc, err = df.FetchNext(ctx) + var execInfo ExecInfo + encdoc, execInfo, err = df.FetchNext(ctx) if err != nil { - return nil, core.Doc{}, err + return nil, core.Doc{}, ExecInfo{}, err } + resultExecInfo.Add(execInfo) if encdoc == nil { - return nil, core.Doc{}, nil + return nil, core.Doc{}, resultExecInfo, err } status = client.Active } - doc, err := encdoc.DecodeToDoc(mapping) + doc, err := encdoc.DecodeToDoc() if err != nil { - return nil, core.Doc{}, err + return nil, core.Doc{}, ExecInfo{}, err } doc.Status = status - return encdoc.Key, doc, err + return encdoc.Key(), doc, resultExecInfo, err } // Close closes the DocumentFetcher. func (df *DocumentFetcher) Close() error { - if df.kvIter == nil { - return nil - } - - err := df.kvIter.Close() - if err != nil { - return err - } - - if df.kvResultsIter == nil { - return nil + if df.kvIter != nil { + err := df.kvIter.Close() + if err != nil { + return err + } } - err = df.kvResultsIter.Close() - if err != nil { - return err + if df.kvResultsIter != nil { + err := df.kvResultsIter.Close() + if err != nil { + return err + } } if df.deletedDocFetcher != nil { diff --git a/db/fetcher/mocks/EncodedDocument.go b/db/fetcher/mocks/EncodedDocument.go new file mode 100644 index 0000000000..23522ef1f2 --- /dev/null +++ b/db/fetcher/mocks/EncodedDocument.go @@ -0,0 +1,257 @@ +// Code generated by mockery v2.30.1. DO NOT EDIT. + +package mocks + +import ( + client "github.com/sourcenetwork/defradb/client" + core "github.com/sourcenetwork/defradb/core" + + mock "github.com/stretchr/testify/mock" +) + +// EncodedDocument is an autogenerated mock type for the EncodedDocument type +type EncodedDocument struct { + mock.Mock +} + +type EncodedDocument_Expecter struct { + mock *mock.Mock +} + +func (_m *EncodedDocument) EXPECT() *EncodedDocument_Expecter { + return &EncodedDocument_Expecter{mock: &_m.Mock} +} + +// Decode provides a mock function with given fields: +func (_m *EncodedDocument) Decode() (*client.Document, error) { + ret := _m.Called() + + var r0 *client.Document + var r1 error + if rf, ok := ret.Get(0).(func() (*client.Document, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *client.Document); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.Document) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EncodedDocument_Decode_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Decode' +type EncodedDocument_Decode_Call struct { + *mock.Call +} + +// Decode is a helper method to define mock.On call +func (_e *EncodedDocument_Expecter) Decode() *EncodedDocument_Decode_Call { + return &EncodedDocument_Decode_Call{Call: _e.mock.On("Decode")} +} + +func (_c *EncodedDocument_Decode_Call) Run(run func()) *EncodedDocument_Decode_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EncodedDocument_Decode_Call) Return(_a0 *client.Document, _a1 error) *EncodedDocument_Decode_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EncodedDocument_Decode_Call) RunAndReturn(run func() (*client.Document, error)) *EncodedDocument_Decode_Call { + _c.Call.Return(run) + return _c +} + +// DecodeToDoc provides a mock function with given fields: +func (_m *EncodedDocument) DecodeToDoc() (core.Doc, error) { + ret := _m.Called() + + var r0 core.Doc + var r1 error + if rf, ok := ret.Get(0).(func() (core.Doc, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() core.Doc); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(core.Doc) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EncodedDocument_DecodeToDoc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DecodeToDoc' +type EncodedDocument_DecodeToDoc_Call struct { + *mock.Call +} + +// DecodeToDoc is a helper method to define mock.On call +func (_e *EncodedDocument_Expecter) DecodeToDoc() *EncodedDocument_DecodeToDoc_Call { + return &EncodedDocument_DecodeToDoc_Call{Call: _e.mock.On("DecodeToDoc")} +} + +func (_c *EncodedDocument_DecodeToDoc_Call) Run(run func()) *EncodedDocument_DecodeToDoc_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EncodedDocument_DecodeToDoc_Call) Return(_a0 core.Doc, _a1 error) *EncodedDocument_DecodeToDoc_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EncodedDocument_DecodeToDoc_Call) RunAndReturn(run func() (core.Doc, error)) *EncodedDocument_DecodeToDoc_Call { + _c.Call.Return(run) + return _c +} + +// Key provides a mock function with given fields: +func (_m *EncodedDocument) Key() []byte { + ret := _m.Called() + + var r0 []byte + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// EncodedDocument_Key_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Key' +type EncodedDocument_Key_Call struct { + *mock.Call +} + +// Key is a helper method to define mock.On call +func (_e *EncodedDocument_Expecter) Key() *EncodedDocument_Key_Call { + return &EncodedDocument_Key_Call{Call: _e.mock.On("Key")} +} + +func (_c *EncodedDocument_Key_Call) Run(run func()) *EncodedDocument_Key_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EncodedDocument_Key_Call) Return(_a0 []byte) *EncodedDocument_Key_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EncodedDocument_Key_Call) RunAndReturn(run func() []byte) *EncodedDocument_Key_Call { + _c.Call.Return(run) + return _c +} + +// Reset provides a mock function with given fields: +func (_m *EncodedDocument) Reset() { + _m.Called() +} + +// EncodedDocument_Reset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reset' +type EncodedDocument_Reset_Call struct { + *mock.Call +} + +// Reset is a helper method to define mock.On call +func (_e *EncodedDocument_Expecter) Reset() *EncodedDocument_Reset_Call { + return &EncodedDocument_Reset_Call{Call: _e.mock.On("Reset")} +} + +func (_c *EncodedDocument_Reset_Call) Run(run func()) *EncodedDocument_Reset_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EncodedDocument_Reset_Call) Return() *EncodedDocument_Reset_Call { + _c.Call.Return() + return _c +} + +func (_c *EncodedDocument_Reset_Call) RunAndReturn(run func()) *EncodedDocument_Reset_Call { + _c.Call.Return(run) + return _c +} + +// SchemaVersionID provides a mock function with given fields: +func (_m *EncodedDocument) SchemaVersionID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// EncodedDocument_SchemaVersionID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SchemaVersionID' +type EncodedDocument_SchemaVersionID_Call struct { + *mock.Call +} + +// SchemaVersionID is a helper method to define mock.On call +func (_e *EncodedDocument_Expecter) SchemaVersionID() *EncodedDocument_SchemaVersionID_Call { + return &EncodedDocument_SchemaVersionID_Call{Call: _e.mock.On("SchemaVersionID")} +} + +func (_c *EncodedDocument_SchemaVersionID_Call) Run(run func()) *EncodedDocument_SchemaVersionID_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EncodedDocument_SchemaVersionID_Call) Return(_a0 string) *EncodedDocument_SchemaVersionID_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EncodedDocument_SchemaVersionID_Call) RunAndReturn(run func() string) *EncodedDocument_SchemaVersionID_Call { + _c.Call.Return(run) + return _c +} + +// NewEncodedDocument creates a new instance of EncodedDocument. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEncodedDocument(t interface { + mock.TestingT + Cleanup(func()) +}) *EncodedDocument { + mock := &EncodedDocument{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/db/fetcher/mocks/Fetcher.go b/db/fetcher/mocks/Fetcher.go new file mode 100644 index 0000000000..39f9c89c39 --- /dev/null +++ b/db/fetcher/mocks/Fetcher.go @@ -0,0 +1,370 @@ +// Code generated by mockery v2.32.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + client "github.com/sourcenetwork/defradb/client" + + core "github.com/sourcenetwork/defradb/core" + + datastore "github.com/sourcenetwork/defradb/datastore" + + fetcher "github.com/sourcenetwork/defradb/db/fetcher" + + mapper "github.com/sourcenetwork/defradb/planner/mapper" + + mock "github.com/stretchr/testify/mock" +) + +// Fetcher is an autogenerated mock type for the Fetcher type +type Fetcher struct { + mock.Mock +} + +type Fetcher_Expecter struct { + mock *mock.Mock +} + +func (_m *Fetcher) EXPECT() *Fetcher_Expecter { + return &Fetcher_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *Fetcher) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Fetcher_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type Fetcher_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *Fetcher_Expecter) Close() *Fetcher_Close_Call { + return &Fetcher_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *Fetcher_Close_Call) Run(run func()) *Fetcher_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Fetcher_Close_Call) Return(_a0 error) *Fetcher_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Fetcher_Close_Call) RunAndReturn(run func() error) *Fetcher_Close_Call { + _c.Call.Return(run) + return _c +} + +// FetchNext provides a mock function with given fields: ctx +func (_m *Fetcher) FetchNext(ctx context.Context) (fetcher.EncodedDocument, fetcher.ExecInfo, error) { + ret := _m.Called(ctx) + + var r0 fetcher.EncodedDocument + var r1 fetcher.ExecInfo + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (fetcher.EncodedDocument, fetcher.ExecInfo, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) fetcher.EncodedDocument); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(fetcher.EncodedDocument) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) fetcher.ExecInfo); ok { + r1 = rf(ctx) + } else { + r1 = ret.Get(1).(fetcher.ExecInfo) + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Fetcher_FetchNext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchNext' +type Fetcher_FetchNext_Call struct { + *mock.Call +} + +// FetchNext is a helper method to define mock.On call +// - ctx context.Context +func (_e *Fetcher_Expecter) FetchNext(ctx interface{}) *Fetcher_FetchNext_Call { + return &Fetcher_FetchNext_Call{Call: _e.mock.On("FetchNext", ctx)} +} + +func (_c *Fetcher_FetchNext_Call) Run(run func(ctx context.Context)) *Fetcher_FetchNext_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Fetcher_FetchNext_Call) Return(_a0 fetcher.EncodedDocument, _a1 fetcher.ExecInfo, _a2 error) *Fetcher_FetchNext_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *Fetcher_FetchNext_Call) RunAndReturn(run func(context.Context) (fetcher.EncodedDocument, fetcher.ExecInfo, error)) *Fetcher_FetchNext_Call { + _c.Call.Return(run) + return _c +} + +// FetchNextDecoded provides a mock function with given fields: ctx +func (_m *Fetcher) FetchNextDecoded(ctx context.Context) (*client.Document, fetcher.ExecInfo, error) { + ret := _m.Called(ctx) + + var r0 *client.Document + var r1 fetcher.ExecInfo + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (*client.Document, fetcher.ExecInfo, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *client.Document); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*client.Document) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) fetcher.ExecInfo); ok { + r1 = rf(ctx) + } else { + r1 = ret.Get(1).(fetcher.ExecInfo) + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Fetcher_FetchNextDecoded_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchNextDecoded' +type Fetcher_FetchNextDecoded_Call struct { + *mock.Call +} + +// FetchNextDecoded is a helper method to define mock.On call +// - ctx context.Context +func (_e *Fetcher_Expecter) FetchNextDecoded(ctx interface{}) *Fetcher_FetchNextDecoded_Call { + return &Fetcher_FetchNextDecoded_Call{Call: _e.mock.On("FetchNextDecoded", ctx)} +} + +func (_c *Fetcher_FetchNextDecoded_Call) Run(run func(ctx context.Context)) *Fetcher_FetchNextDecoded_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Fetcher_FetchNextDecoded_Call) Return(_a0 *client.Document, _a1 fetcher.ExecInfo, _a2 error) *Fetcher_FetchNextDecoded_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *Fetcher_FetchNextDecoded_Call) RunAndReturn(run func(context.Context) (*client.Document, fetcher.ExecInfo, error)) *Fetcher_FetchNextDecoded_Call { + _c.Call.Return(run) + return _c +} + +// FetchNextDoc provides a mock function with given fields: ctx, mapping +func (_m *Fetcher) FetchNextDoc(ctx context.Context, mapping *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error) { + ret := _m.Called(ctx, mapping) + + var r0 []byte + var r1 core.Doc + var r2 fetcher.ExecInfo + var r3 error + if rf, ok := ret.Get(0).(func(context.Context, *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error)); ok { + return rf(ctx, mapping) + } + if rf, ok := ret.Get(0).(func(context.Context, *core.DocumentMapping) []byte); ok { + r0 = rf(ctx, mapping) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *core.DocumentMapping) core.Doc); ok { + r1 = rf(ctx, mapping) + } else { + r1 = ret.Get(1).(core.Doc) + } + + if rf, ok := ret.Get(2).(func(context.Context, *core.DocumentMapping) fetcher.ExecInfo); ok { + r2 = rf(ctx, mapping) + } else { + r2 = ret.Get(2).(fetcher.ExecInfo) + } + + if rf, ok := ret.Get(3).(func(context.Context, *core.DocumentMapping) error); ok { + r3 = rf(ctx, mapping) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 +} + +// Fetcher_FetchNextDoc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchNextDoc' +type Fetcher_FetchNextDoc_Call struct { + *mock.Call +} + +// FetchNextDoc is a helper method to define mock.On call +// - ctx context.Context +// - mapping *core.DocumentMapping +func (_e *Fetcher_Expecter) FetchNextDoc(ctx interface{}, mapping interface{}) *Fetcher_FetchNextDoc_Call { + return &Fetcher_FetchNextDoc_Call{Call: _e.mock.On("FetchNextDoc", ctx, mapping)} +} + +func (_c *Fetcher_FetchNextDoc_Call) Run(run func(ctx context.Context, mapping *core.DocumentMapping)) *Fetcher_FetchNextDoc_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*core.DocumentMapping)) + }) + return _c +} + +func (_c *Fetcher_FetchNextDoc_Call) Return(_a0 []byte, _a1 core.Doc, _a2 fetcher.ExecInfo, _a3 error) *Fetcher_FetchNextDoc_Call { + _c.Call.Return(_a0, _a1, _a2, _a3) + return _c +} + +func (_c *Fetcher_FetchNextDoc_Call) RunAndReturn(run func(context.Context, *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error)) *Fetcher_FetchNextDoc_Call { + _c.Call.Return(run) + return _c +} + +// Init provides a mock function with given fields: ctx, txn, col, fields, filter, docmapper, reverse, showDeleted +func (_m *Fetcher) Init(ctx context.Context, txn datastore.Txn, col *client.CollectionDescription, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error { + ret := _m.Called(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, datastore.Txn, *client.CollectionDescription, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok { + r0 = rf(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Fetcher_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init' +type Fetcher_Init_Call struct { + *mock.Call +} + +// Init is a helper method to define mock.On call +// - ctx context.Context +// - txn datastore.Txn +// - col *client.CollectionDescription +// - fields []client.FieldDescription +// - filter *mapper.Filter +// - docmapper *core.DocumentMapping +// - reverse bool +// - showDeleted bool +func (_e *Fetcher_Expecter) Init(ctx interface{}, txn interface{}, col interface{}, fields interface{}, filter interface{}, docmapper interface{}, reverse interface{}, showDeleted interface{}) *Fetcher_Init_Call { + return &Fetcher_Init_Call{Call: _e.mock.On("Init", ctx, txn, col, fields, filter, docmapper, reverse, showDeleted)} +} + +func (_c *Fetcher_Init_Call) Run(run func(ctx context.Context, txn datastore.Txn, col *client.CollectionDescription, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool)) *Fetcher_Init_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(datastore.Txn), args[2].(*client.CollectionDescription), args[3].([]client.FieldDescription), args[4].(*mapper.Filter), args[5].(*core.DocumentMapping), args[6].(bool), args[7].(bool)) + }) + return _c +} + +func (_c *Fetcher_Init_Call) Return(_a0 error) *Fetcher_Init_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, datastore.Txn, *client.CollectionDescription, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error) *Fetcher_Init_Call { + _c.Call.Return(run) + return _c +} + +// Start provides a mock function with given fields: ctx, spans +func (_m *Fetcher) Start(ctx context.Context, spans core.Spans) error { + ret := _m.Called(ctx, spans) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, core.Spans) error); ok { + r0 = rf(ctx, spans) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Fetcher_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type Fetcher_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - ctx context.Context +// - spans core.Spans +func (_e *Fetcher_Expecter) Start(ctx interface{}, spans interface{}) *Fetcher_Start_Call { + return &Fetcher_Start_Call{Call: _e.mock.On("Start", ctx, spans)} +} + +func (_c *Fetcher_Start_Call) Run(run func(ctx context.Context, spans core.Spans)) *Fetcher_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(core.Spans)) + }) + return _c +} + +func (_c *Fetcher_Start_Call) Return(_a0 error) *Fetcher_Start_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Fetcher_Start_Call) RunAndReturn(run func(context.Context, core.Spans) error) *Fetcher_Start_Call { + _c.Call.Return(run) + return _c +} + +// NewFetcher creates a new instance of Fetcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFetcher(t interface { + mock.TestingT + Cleanup(func()) +}) *Fetcher { + mock := &Fetcher{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/db/fetcher/mocks/utils.go b/db/fetcher/mocks/utils.go new file mode 100644 index 0000000000..3ffe12fce2 --- /dev/null +++ b/db/fetcher/mocks/utils.go @@ -0,0 +1,41 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package mocks + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + + "github.com/stretchr/testify/mock" +) + +func NewStubbedFetcher(t *testing.T) *Fetcher { + f := NewFetcher(t) + f.EXPECT().Init( + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ).Maybe().Return(nil) + f.EXPECT().Start(mock.Anything, mock.Anything).Maybe().Return(nil) + f.EXPECT().FetchNext(mock.Anything).Maybe().Return(nil, nil) + f.EXPECT().FetchNextDoc(mock.Anything, mock.Anything).Maybe(). + Return(NewEncodedDocument(t), core.Doc{}, nil) + f.EXPECT().FetchNextDecoded(mock.Anything).Maybe().Return(&client.Document{}, nil) + f.EXPECT().Close().Maybe().Return(nil) + return f +} diff --git a/db/fetcher/versioned.go b/db/fetcher/versioned.go index 8fd8e4245c..53ae6b8eaf 100644 --- a/db/fetcher/versioned.go +++ b/db/fetcher/versioned.go @@ -27,6 +27,7 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/merkle/crdt" + "github.com/sourcenetwork/defradb/planner/mapper" ) var ( @@ -98,22 +99,41 @@ type VersionedFetcher struct { // Init initializes the VersionedFetcher. func (vf *VersionedFetcher) Init( + ctx context.Context, + txn datastore.Txn, col *client.CollectionDescription, - fields []*client.FieldDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docmapper *core.DocumentMapping, reverse bool, showDeleted bool, ) error { vf.col = col vf.queuedCids = list.New() vf.mCRDTs = make(map[uint32]crdt.MerkleCRDT) + vf.txn = txn + + // create store + root := memory.NewDatastore(ctx) + vf.root = root + + var err error + vf.store, err = datastore.NewTxnFrom( + ctx, + vf.root, + false, + ) // were going to discard and nuke this later + if err != nil { + return err + } // run the DF init, VersionedFetchers only supports the Primary (0) index vf.DocumentFetcher = new(DocumentFetcher) - return vf.DocumentFetcher.Init(col, fields, reverse, showDeleted) + return vf.DocumentFetcher.Init(ctx, vf.store, col, fields, filter, docmapper, reverse, showDeleted) } // Start serializes the correct state according to the Key and CID. -func (vf *VersionedFetcher) Start(ctx context.Context, txn datastore.Txn, spans core.Spans) error { +func (vf *VersionedFetcher) Start(ctx context.Context, spans core.Spans) error { if vf.col == nil { return client.NewErrUninitializeProperty("VersionedFetcher", "CollectionDescription") } @@ -140,29 +160,15 @@ func (vf *VersionedFetcher) Start(ctx context.Context, txn datastore.Txn, spans return NewErrFailedToDecodeCIDForVFetcher(err) } - vf.txn = txn vf.ctx = ctx vf.key = dk vf.version = c - // create store - root := memory.NewDatastore(ctx) - vf.root = root - - vf.store, err = datastore.NewTxnFrom( - ctx, - vf.root, - false, - ) // were going to discard and nuke this later - if err != nil { - return err - } - if err := vf.seekTo(vf.version); err != nil { return NewErrFailedToSeek(c, err) } - return vf.DocumentFetcher.Start(ctx, vf.store, core.Spans{}) + return vf.DocumentFetcher.Start(ctx, core.Spans{}) } // Rootstore returns the rootstore of the VersionedFetcher. @@ -191,7 +197,7 @@ func (vf *VersionedFetcher) SeekTo(ctx context.Context, c cid.Cid) error { return err } - return vf.DocumentFetcher.Start(ctx, vf.store, core.Spans{}) + return vf.DocumentFetcher.Start(ctx, core.Spans{}) } // seekTo seeks to the given CID version by stepping through the CRDT state graph from the beginning diff --git a/db/fetcher_test.go b/db/fetcher_test.go index af6613373f..209fb7a8c3 100644 --- a/db/fetcher_test.go +++ b/db/fetcher_test.go @@ -15,9 +15,11 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/db/fetcher" ) @@ -50,10 +52,10 @@ func newTestCollectionDescription() client.CollectionDescription { } } -func newTestFetcher() (*fetcher.DocumentFetcher, error) { +func newTestFetcher(ctx context.Context, txn datastore.Txn) (*fetcher.DocumentFetcher, error) { df := new(fetcher.DocumentFetcher) desc := newTestCollectionDescription() - err := df.Init(&desc, nil, false, false) + err := df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) if err != nil { return nil, err } @@ -61,7 +63,7 @@ func newTestFetcher() (*fetcher.DocumentFetcher, error) { } func TestFetcherInit(t *testing.T) { - _, err := newTestFetcher() + _, err := newTestFetcher(context.Background(), nil) assert.NoError(t, err) } @@ -77,27 +79,17 @@ func TestFetcherStart(t *testing.T) { t.Error(err) return } - df, err := newTestFetcher() + df, err := newTestFetcher(ctx, txn) assert.NoError(t, err) - err = df.Start(ctx, txn, core.Spans{}) + err = df.Start(ctx, core.Spans{}) assert.NoError(t, err) } func TestFetcherStartWithoutInit(t *testing.T) { ctx := context.Background() - db, err := newMemoryDB(ctx) - if err != nil { - t.Error(err) - return - } - txn, err := db.NewTxn(ctx, true) - if err != nil { - t.Error(err) - return - } df := new(fetcher.DocumentFetcher) - err = df.Start(ctx, txn, core.Spans{}) + err := df.Start(ctx, core.Spans{}) assert.Error(t, err) } @@ -133,13 +125,13 @@ func TestFetcherGetAllPrimaryIndexEncodedDocSingle(t *testing.T) { df := new(fetcher.DocumentFetcher) desc := col.Description() - err = df.Init(&desc, nil, false, false) + err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) assert.NoError(t, err) - err = df.Start(ctx, txn, core.Spans{}) + err = df.Start(ctx, core.Spans{}) assert.NoError(t, err) - encdoc, err := df.FetchNext(ctx) + encdoc, _, err := df.FetchNext(ctx) assert.NoError(t, err) assert.NotNil(t, encdoc) } @@ -178,16 +170,16 @@ func TestFetcherGetAllPrimaryIndexEncodedDocMultiple(t *testing.T) { df := new(fetcher.DocumentFetcher) desc := col.Description() - err = df.Init(&desc, nil, false, false) + err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) assert.NoError(t, err) - err = df.Start(ctx, txn, core.Spans{}) + err = df.Start(ctx, core.Spans{}) assert.NoError(t, err) - encdoc, err := df.FetchNext(ctx) + encdoc, _, err := df.FetchNext(ctx) assert.NoError(t, err) assert.NotNil(t, encdoc) - encdoc, err = df.FetchNext(ctx) + encdoc, _, err = df.FetchNext(ctx) assert.NoError(t, err) assert.NotNil(t, encdoc) } @@ -208,23 +200,23 @@ func TestFetcherGetAllPrimaryIndexDecodedSingle(t *testing.T) { err = col.Save(ctx, doc) assert.NoError(t, err) - df := new(fetcher.DocumentFetcher) - desc := col.Description() - err = df.Init(&desc, nil, false, false) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, true) if err != nil { t.Error(err) return } - err = df.Start(ctx, txn, core.Spans{}) + df := new(fetcher.DocumentFetcher) + desc := col.Description() + err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) + assert.NoError(t, err) + + err = df.Start(ctx, core.Spans{}) assert.NoError(t, err) - ddoc, err := df.FetchNextDecoded(ctx) + ddoc, _, err := df.FetchNextDecoded(ctx) assert.NoError(t, err) - assert.NotNil(t, ddoc) + require.NotNil(t, ddoc) // value check name, err := ddoc.Get("Name") @@ -260,21 +252,21 @@ func TestFetcherGetAllPrimaryIndexDecodedMultiple(t *testing.T) { err = col.Save(ctx, doc) assert.NoError(t, err) - df := new(fetcher.DocumentFetcher) - desc := col.Description() - err = df.Init(&desc, nil, false, false) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, true) if err != nil { t.Error(err) return } - err = df.Start(ctx, txn, core.Spans{}) + df := new(fetcher.DocumentFetcher) + desc := col.Description() + err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) + assert.NoError(t, err) + + err = df.Start(ctx, core.Spans{}) assert.NoError(t, err) - ddoc, err := df.FetchNextDecoded(ctx) + ddoc, _, err := df.FetchNextDecoded(ctx) assert.NoError(t, err) assert.NotNil(t, ddoc) @@ -287,7 +279,7 @@ func TestFetcherGetAllPrimaryIndexDecodedMultiple(t *testing.T) { assert.Equal(t, "John", name) assert.Equal(t, uint64(21), age) - ddoc, err = df.FetchNextDecoded(ctx) + ddoc, _, err = df.FetchNextDecoded(ctx) assert.NoError(t, err) assert.NotNil(t, ddoc) @@ -317,9 +309,15 @@ func TestFetcherGetOnePrimaryIndexDecoded(t *testing.T) { err = col.Save(ctx, doc) assert.NoError(t, err) + txn, err := db.NewTxn(ctx, true) + if err != nil { + t.Error(err) + return + } + df := new(fetcher.DocumentFetcher) desc := col.Description() - err = df.Init(&desc, nil, false, false) + err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) assert.NoError(t, err) // create a span for our document we wish to find @@ -328,16 +326,10 @@ func TestFetcherGetOnePrimaryIndexDecoded(t *testing.T) { core.NewSpan(docKey, docKey.PrefixEnd()), ) - txn, err := db.NewTxn(ctx, true) - if err != nil { - t.Error(err) - return - } - - err = df.Start(ctx, txn, spans) + err = df.Start(ctx, spans) assert.NoError(t, err) - ddoc, err := df.FetchNextDecoded(ctx) + ddoc, _, err := df.FetchNextDecoded(ctx) assert.NoError(t, err) assert.NotNil(t, ddoc) diff --git a/db/index.go b/db/index.go new file mode 100644 index 0000000000..2c5ea2d6b2 --- /dev/null +++ b/db/index.go @@ -0,0 +1,237 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "time" + + ds "github.com/ipfs/go-datastore" + + "github.com/ipfs/go-datastore/query" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/errors" +) + +// CollectionIndex is an interface for collection indexes +// It abstracts away common index functionality to be implemented +// by different index types: non-unique, unique, and composite +type CollectionIndex interface { + // Save indexes a document by storing it + Save(context.Context, datastore.Txn, *client.Document) error + // Update updates an existing document in the index + Update(context.Context, datastore.Txn, *client.Document, *client.Document) error + // RemoveAll removes all documents from the index + RemoveAll(context.Context, datastore.Txn) error + // Name returns the name of the index + Name() string + // Description returns the description of the index + Description() client.IndexDescription +} + +func canConvertIndexFieldValue[T any](val any) bool { + _, ok := val.(T) + return ok +} + +func getValidateIndexFieldFunc(kind client.FieldKind) func(any) bool { + switch kind { + case client.FieldKind_STRING: + return canConvertIndexFieldValue[string] + case client.FieldKind_INT: + return canConvertIndexFieldValue[int64] + case client.FieldKind_FLOAT: + return canConvertIndexFieldValue[float64] + case client.FieldKind_BOOL: + return canConvertIndexFieldValue[bool] + case client.FieldKind_DATETIME: + return func(val any) bool { + timeStrVal, ok := val.(string) + if !ok { + return false + } + _, err := time.Parse(time.RFC3339, timeStrVal) + return err == nil + } + default: + return nil + } +} + +func getFieldValidateFunc(kind client.FieldKind) (func(any) bool, error) { + validateFunc := getValidateIndexFieldFunc(kind) + if validateFunc == nil { + return nil, NewErrUnsupportedIndexFieldType(kind) + } + return validateFunc, nil +} + +// NewCollectionIndex creates a new collection index +func NewCollectionIndex( + collection client.Collection, + desc client.IndexDescription, +) (CollectionIndex, error) { + if len(desc.Fields) == 0 { + return nil, NewErrIndexDescHasNoFields(desc) + } + index := &collectionSimpleIndex{collection: collection, desc: desc} + schema := collection.Description().Schema + fieldID := client.FieldID(schema.GetFieldKey(desc.Fields[0].Name)) + field, foundField := collection.Description().GetFieldByID(fieldID) + if fieldID == client.FieldID(0) || !foundField { + return nil, NewErrIndexDescHasNonExistingField(desc, desc.Fields[0].Name) + } + var e error + index.fieldDesc = field + index.validateFieldFunc, e = getFieldValidateFunc(field.Kind) + return index, e +} + +// collectionSimpleIndex is an non-unique index that indexes documents by a single field. +// Single-field indexes store values only in ascending order. +type collectionSimpleIndex struct { + collection client.Collection + desc client.IndexDescription + validateFieldFunc func(any) bool + fieldDesc client.FieldDescription +} + +var _ CollectionIndex = (*collectionSimpleIndex)(nil) + +func (i *collectionSimpleIndex) getDocumentsIndexKey( + doc *client.Document, +) (core.IndexDataStoreKey, error) { + fieldValue, err := i.getDocFieldValue(doc) + if err != nil { + return core.IndexDataStoreKey{}, err + } + + indexDataStoreKey := core.IndexDataStoreKey{} + indexDataStoreKey.CollectionID = i.collection.ID() + indexDataStoreKey.IndexID = i.desc.ID + indexDataStoreKey.FieldValues = [][]byte{fieldValue, []byte(doc.Key().String())} + return indexDataStoreKey, nil +} + +func (i *collectionSimpleIndex) getDocFieldValue(doc *client.Document) ([]byte, error) { + // collectionSimpleIndex only supports single field indexes, that's why we + // can safely access the first field + indexedFieldName := i.desc.Fields[0].Name + fieldVal, err := doc.GetValue(indexedFieldName) + if err != nil { + if errors.Is(err, client.ErrFieldNotExist) { + return client.NewCBORValue(client.LWW_REGISTER, nil).Bytes() + } else { + return nil, err + } + } + writeableVal, ok := fieldVal.(client.WriteableValue) + if !ok || !i.validateFieldFunc(fieldVal.Value()) { + return nil, NewErrInvalidFieldValue(i.fieldDesc.Kind, writeableVal) + } + return writeableVal.Bytes() +} + +// Save indexes a document by storing the indexed field value. +func (i *collectionSimpleIndex) Save( + ctx context.Context, + txn datastore.Txn, + doc *client.Document, +) error { + key, err := i.getDocumentsIndexKey(doc) + if err != nil { + return err + } + err = txn.Datastore().Put(ctx, key.ToDS(), []byte{}) + if err != nil { + return NewErrFailedToStoreIndexedField(key.ToDS().String(), err) + } + return nil +} + +// Update updates indexed field values of an existing document. +// It removes the old document from the index and adds the new one. +func (i *collectionSimpleIndex) Update( + ctx context.Context, + txn datastore.Txn, + oldDoc *client.Document, + newDoc *client.Document, +) error { + key, err := i.getDocumentsIndexKey(oldDoc) + if err != nil { + return err + } + err = txn.Datastore().Delete(ctx, key.ToDS()) + if err != nil { + return err + } + return i.Save(ctx, txn, newDoc) +} + +func fetchKeysForPrefix( + ctx context.Context, + prefix string, + storage ds.Read, +) ([]ds.Key, error) { + q, err := storage.Query(ctx, query.Query{Prefix: prefix}) + if err != nil { + return nil, err + } + + keys := make([]ds.Key, 0) + for res := range q.Next() { + if res.Error != nil { + _ = q.Close() + return nil, res.Error + } + keys = append(keys, ds.NewKey(res.Key)) + } + if err = q.Close(); err != nil { + return nil, err + } + + return keys, nil +} + +// RemoveAll remove all artifacts of the index from the storage, i.e. all index +// field values for all documents. +func (i *collectionSimpleIndex) RemoveAll(ctx context.Context, txn datastore.Txn) error { + prefixKey := core.IndexDataStoreKey{} + prefixKey.CollectionID = i.collection.ID() + prefixKey.IndexID = i.desc.ID + + keys, err := fetchKeysForPrefix(ctx, prefixKey.ToString(), txn.Datastore()) + if err != nil { + return err + } + + for _, key := range keys { + err := txn.Datastore().Delete(ctx, key) + if err != nil { + return NewCanNotDeleteIndexedField(err) + } + } + + return nil +} + +// Name returns the name of the index +func (i *collectionSimpleIndex) Name() string { + return i.desc.Name +} + +// Description returns the description of the index +func (i *collectionSimpleIndex) Description() client.IndexDescription { + return i.desc +} diff --git a/db/index_test.go b/db/index_test.go new file mode 100644 index 0000000000..dce7e65bb4 --- /dev/null +++ b/db/index_test.go @@ -0,0 +1,1424 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + "testing" + + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/datastore/mocks" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/request/graphql/schema" +) + +const ( + usersColName = "Users" + productsColName = "Products" + + usersNameFieldName = "name" + usersAgeFieldName = "age" + usersWeightFieldName = "weight" + + productsIDFieldName = "id" + productsPriceFieldName = "price" + productsCategoryFieldName = "category" + productsAvailableFieldName = "available" + + testUsersColIndexName = "user_name" + testUsersColIndexAge = "user_age" + testUsersColIndexWeight = "user_weight" + + userColVersionID = "bafkreiefzlx2xsfaxixs24hcqwwqpa3nuqbutkapasymk3d5v4fxa4rlhy" +) + +type indexTestFixture struct { + ctx context.Context + db *implicitTxnDB + txn datastore.Txn + users *collection + t *testing.T +} + +func getUsersCollectionDesc() client.CollectionDescription { + return client.CollectionDescription{ + Name: usersColName, + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + { + Name: usersNameFieldName, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: usersAgeFieldName, + Kind: client.FieldKind_INT, + Typ: client.LWW_REGISTER, + }, + { + Name: usersWeightFieldName, + Kind: client.FieldKind_FLOAT, + Typ: client.LWW_REGISTER, + }, + }, + }, + } +} + +func getProductsCollectionDesc() client.CollectionDescription { + return client.CollectionDescription{ + Name: productsColName, + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + { + Name: productsIDFieldName, + Kind: client.FieldKind_INT, + Typ: client.LWW_REGISTER, + }, + { + Name: productsPriceFieldName, + Kind: client.FieldKind_FLOAT, + Typ: client.LWW_REGISTER, + }, + { + Name: productsCategoryFieldName, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: productsAvailableFieldName, + Kind: client.FieldKind_BOOL, + Typ: client.LWW_REGISTER, + }, + }, + }, + } +} + +func newIndexTestFixtureBare(t *testing.T) *indexTestFixture { + ctx := context.Background() + db, err := newMemoryDB(ctx) + require.NoError(t, err) + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + return &indexTestFixture{ + ctx: ctx, + db: db, + txn: txn, + t: t, + } +} + +func newIndexTestFixture(t *testing.T) *indexTestFixture { + f := newIndexTestFixtureBare(t) + f.users = f.createCollection(getUsersCollectionDesc()) + return f +} + +func (f *indexTestFixture) createCollectionIndex( + desc client.IndexDescription, +) (client.IndexDescription, error) { + return f.createCollectionIndexFor(f.users.Name(), desc) +} + +func getUsersIndexDescOnName() client.IndexDescription { + return client.IndexDescription{ + Name: testUsersColIndexName, + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Ascending}, + }, + } +} + +func getUsersIndexDescOnAge() client.IndexDescription { + return client.IndexDescription{ + Name: testUsersColIndexAge, + Fields: []client.IndexedFieldDescription{ + {Name: usersAgeFieldName, Direction: client.Ascending}, + }, + } +} + +func getUsersIndexDescOnWeight() client.IndexDescription { + return client.IndexDescription{ + Name: testUsersColIndexWeight, + Fields: []client.IndexedFieldDescription{ + {Name: usersWeightFieldName, Direction: client.Ascending}, + }, + } +} + +func getProductsIndexDescOnCategory() client.IndexDescription { + return client.IndexDescription{ + Name: testUsersColIndexAge, + Fields: []client.IndexedFieldDescription{ + {Name: productsCategoryFieldName, Direction: client.Ascending}, + }, + } +} + +func (f *indexTestFixture) createUserCollectionIndexOnName() client.IndexDescription { + newDesc, err := f.createCollectionIndexFor(f.users.Name(), getUsersIndexDescOnName()) + require.NoError(f.t, err) + f.commitTxn() + return newDesc +} + +func (f *indexTestFixture) createUserCollectionIndexOnAge() client.IndexDescription { + newDesc, err := f.createCollectionIndexFor(f.users.Name(), getUsersIndexDescOnAge()) + require.NoError(f.t, err) + f.commitTxn() + return newDesc +} + +func (f *indexTestFixture) dropIndex(colName, indexName string) error { + return f.db.dropCollectionIndex(f.ctx, f.txn, colName, indexName) +} + +func (f *indexTestFixture) countIndexPrefixes(colName, indexName string) int { + prefix := core.NewCollectionIndexKey(usersColName, indexName) + q, err := f.txn.Systemstore().Query(f.ctx, query.Query{ + Prefix: prefix.ToString(), + }) + assert.NoError(f.t, err) + defer func() { + err := q.Close() + assert.NoError(f.t, err) + }() + + count := 0 + for res := range q.Next() { + if res.Error != nil { + assert.NoError(f.t, err) + } + count++ + } + return count +} + +func (f *indexTestFixture) commitTxn() { + err := f.txn.Commit(f.ctx) + require.NoError(f.t, err) + txn, err := f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) + f.txn = txn +} + +func (f *indexTestFixture) createCollectionIndexFor( + collectionName string, + desc client.IndexDescription, +) (client.IndexDescription, error) { + return f.db.createCollectionIndex(f.ctx, f.txn, collectionName, desc) +} + +func (f *indexTestFixture) getAllIndexes() (map[client.CollectionName][]client.IndexDescription, error) { + return f.db.getAllIndexes(f.ctx, f.txn) +} + +func (f *indexTestFixture) getCollectionIndexes(colName string) ([]client.IndexDescription, error) { + return f.db.fetchCollectionIndexDescriptions(f.ctx, f.txn, colName) +} + +func (f *indexTestFixture) createCollection( + desc client.CollectionDescription, +) *collection { + col, err := f.db.createCollection(f.ctx, f.txn, desc) + assert.NoError(f.t, err) + err = f.txn.Commit(f.ctx) + assert.NoError(f.t, err) + f.txn, err = f.db.NewTxn(f.ctx, false) + assert.NoError(f.t, err) + return col.(*collection) +} + +func TestCreateIndex_IfFieldsIsEmpty_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + _, err := f.createCollectionIndex(client.IndexDescription{ + Name: "some_index_name", + }) + assert.EqualError(t, err, errIndexMissingFields) +} + +func TestCreateIndex_IfIndexDescriptionIDIsNotZero_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + for _, id := range []uint32{1, 20, 999} { + desc := client.IndexDescription{ + Name: "some_index_name", + ID: id, + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Ascending}, + }, + } + _, err := f.createCollectionIndex(desc) + assert.ErrorIs(t, err, NewErrNonZeroIndexIDProvided(0)) + } +} + +func TestCreateIndex_IfValidInput_CreateIndex(t *testing.T) { + f := newIndexTestFixture(t) + + desc := client.IndexDescription{ + Name: "some_index_name", + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Ascending}, + }, + } + resultDesc, err := f.createCollectionIndex(desc) + assert.NoError(t, err) + assert.Equal(t, desc.Name, resultDesc.Name) + assert.Equal(t, desc.Fields, resultDesc.Fields) +} + +func TestCreateIndex_IfFieldNameIsEmpty_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + desc := client.IndexDescription{ + Name: "some_index_name", + Fields: []client.IndexedFieldDescription{ + {Name: "", Direction: client.Ascending}, + }, + } + _, err := f.createCollectionIndex(desc) + assert.EqualError(t, err, errIndexFieldMissingName) +} + +func TestCreateIndex_IfFieldHasNoDirection_DefaultToAsc(t *testing.T) { + f := newIndexTestFixture(t) + + desc := client.IndexDescription{ + Name: "some_index_name", + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + newDesc, err := f.createCollectionIndex(desc) + assert.NoError(t, err) + assert.Equal(t, client.Ascending, newDesc.Fields[0].Direction) +} + +func TestCreateIndex_IfNameIsNotSpecified_Generate(t *testing.T) { + f := newIndexTestFixtureBare(t) + colDesc := getUsersCollectionDesc() + const colName = "UsErS" + const fieldName = "NaMe" + colDesc.Name = colName + colDesc.Schema.Name = colName // Which one should we use? + colDesc.Schema.Fields[1].Name = fieldName + f.users = f.createCollection(colDesc) + + desc := client.IndexDescription{ + Name: "", + Fields: []client.IndexedFieldDescription{ + {Name: fieldName, Direction: client.Ascending}, + }, + } + + newDesc, err := f.createCollectionIndex(desc) + assert.NoError(t, err) + assert.Equal(t, colName+"_"+fieldName+"_ASC", newDesc.Name) +} + +func TestCreateIndex_IfSingleFieldInDescOrder_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + desc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Descending}, + }, + } + _, err := f.createCollectionIndex(desc) + assert.EqualError(t, err, errIndexSingleFieldWrongDirection) +} + +func TestCreateIndex_IfIndexWithNameAlreadyExists_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + name := "some_index_name" + desc1 := client.IndexDescription{ + Name: name, + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + desc2 := client.IndexDescription{ + Name: name, + Fields: []client.IndexedFieldDescription{{Name: usersAgeFieldName}}, + } + _, err := f.createCollectionIndex(desc1) + assert.NoError(t, err) + _, err = f.createCollectionIndex(desc2) + assert.ErrorIs(t, err, NewErrIndexWithNameAlreadyExists(name)) +} + +func TestCreateIndex_IfGeneratedNameMatchesExisting_AddIncrement(t *testing.T) { + f := newIndexTestFixture(t) + + name := usersColName + "_" + usersAgeFieldName + "_ASC" + desc1 := client.IndexDescription{ + Name: name, + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + desc2 := client.IndexDescription{ + Name: name + "_2", + Fields: []client.IndexedFieldDescription{{Name: usersWeightFieldName}}, + } + desc3 := client.IndexDescription{ + Name: "", + Fields: []client.IndexedFieldDescription{{Name: usersAgeFieldName}}, + } + _, err := f.createCollectionIndex(desc1) + assert.NoError(t, err) + _, err = f.createCollectionIndex(desc2) + assert.NoError(t, err) + newDesc3, err := f.createCollectionIndex(desc3) + assert.NoError(t, err) + assert.Equal(t, name+"_3", newDesc3.Name) +} + +func TestCreateIndex_ShouldSaveToSystemStorage(t *testing.T) { + f := newIndexTestFixture(t) + + name := "users_age_ASC" + desc := client.IndexDescription{ + Name: name, + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + _, err := f.createCollectionIndex(desc) + assert.NoError(t, err) + + key := core.NewCollectionIndexKey(f.users.Name(), name) + data, err := f.txn.Systemstore().Get(f.ctx, key.ToDS()) + assert.NoError(t, err) + var deserialized client.IndexDescription + err = json.Unmarshal(data, &deserialized) + assert.NoError(t, err) + desc.ID = 1 + assert.Equal(t, desc, deserialized) +} + +func TestCreateIndex_IfStorageFails_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + testCases := []struct { + Name string + ExpectedError error + GetMockSystemstore func(t *testing.T) *mocks.DSReaderWriter + AlterDescription func(desc *client.IndexDescription) + }{ + { + Name: "call Has() for custom index name", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Has(mock.Anything, mock.Anything).Unset() + store.EXPECT().Has(mock.Anything, mock.Anything).Return(false, testErr) + return store + }, + AlterDescription: func(desc *client.IndexDescription) {}, + }, + { + Name: "call Has() for generated index name", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Has(mock.Anything, mock.Anything).Unset() + store.EXPECT().Has(mock.Anything, mock.Anything).Return(false, testErr) + return store + }, + AlterDescription: func(desc *client.IndexDescription) { + desc.Name = "" + }, + }, + { + Name: "fails to store index description", + ExpectedError: NewErrInvalidStoredIndex(nil), + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Put(mock.Anything, mock.Anything, mock.Anything).Unset() + key := core.NewCollectionIndexKey(usersColName, testUsersColIndexName) + store.EXPECT().Put(mock.Anything, key.ToDS(), mock.Anything).Return(testErr) + return store + }, + AlterDescription: func(desc *client.IndexDescription) {}, + }, + } + + for _, testCase := range testCases { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = testCase.GetMockSystemstore(t) + f.stubSystemStore(mockedTxn.MockSystemstore.EXPECT()) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + + desc := client.IndexDescription{ + Name: testUsersColIndexName, + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + testCase.AlterDescription(&desc) + + _, err := f.createCollectionIndex(desc) + assert.ErrorIs(t, err, testErr, testCase.Name) + } +} + +func TestCreateIndex_IfCollectionDoesntExist_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + desc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, + } + + _, err := f.createCollectionIndexFor(productsColName, desc) + assert.ErrorIs(t, err, NewErrCanNotReadCollection(usersColName, nil)) +} + +func TestCreateIndex_IfPropertyDoesntExist_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + const field = "non_existing_field" + desc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{{Name: field}}, + } + + _, err := f.createCollectionIndex(desc) + assert.ErrorIs(t, err, NewErrNonExistingFieldForIndex(field)) +} + +func TestCreateIndex_WithMultipleCollectionsAndIndexes_AssignIncrementedIDPerCollection(t *testing.T) { + f := newIndexTestFixtureBare(t) + users := f.createCollection(getUsersCollectionDesc()) + products := f.createCollection(getProductsCollectionDesc()) + + makeIndex := func(fieldName string) client.IndexDescription { + return client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: fieldName, Direction: client.Ascending}, + }, + } + } + + createIndexAndAssert := func(col client.Collection, fieldName string, expectedID uint32) { + desc, err := f.createCollectionIndexFor(col.Name(), makeIndex(fieldName)) + require.NoError(t, err) + assert.Equal(t, expectedID, desc.ID) + seqKey := core.NewSequenceKey(fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, col.ID())) + storedSeqKey, err := f.txn.Systemstore().Get(f.ctx, seqKey.ToDS()) + assert.NoError(t, err) + storedSeqVal := binary.BigEndian.Uint64(storedSeqKey) + assert.Equal(t, expectedID, uint32(storedSeqVal)) + } + + createIndexAndAssert(users, usersNameFieldName, 1) + createIndexAndAssert(users, usersAgeFieldName, 2) + createIndexAndAssert(products, productsIDFieldName, 1) + createIndexAndAssert(products, productsCategoryFieldName, 2) +} + +func TestCreateIndex_IfFailsToCreateTxn_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + testErr := errors.New("test error") + + mockedRootStore := mocks.NewRootStore(t) + mockedRootStore.EXPECT().NewTransaction(mock.Anything, mock.Anything).Return(nil, testErr) + f.db.rootstore = mockedRootStore + + _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) + require.ErrorIs(t, err, testErr) +} + +func TestCreateIndex_IfProvideInvalidIndexName_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + indexDesc := getUsersIndexDescOnName() + indexDesc.Name = "!" + _, err := f.users.CreateIndex(f.ctx, indexDesc) + require.ErrorIs(t, err, schema.NewErrIndexWithInvalidName(indexDesc.Name)) +} + +func TestCreateIndex_ShouldUpdateCollectionsDescription(t *testing.T) { + f := newIndexTestFixture(t) + + indOnName, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) + require.NoError(t, err) + + assert.ElementsMatch(t, []client.IndexDescription{indOnName}, f.users.Description().Indexes) + + indOnAge, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnAge()) + require.NoError(t, err) + + assert.ElementsMatch(t, []client.IndexDescription{indOnName, indOnAge}, + f.users.Description().Indexes) +} + +func TestCreateIndex_NewCollectionDescription_ShouldIncludeIndexDescription(t *testing.T) { + f := newIndexTestFixture(t) + + _, err := f.createCollectionIndex(getUsersIndexDescOnName()) + require.NoError(t, err) + + desc := getUsersIndexDescOnAge() + desc.Name = "" + _, err = f.createCollectionIndex(desc) + require.NoError(t, err) + + cols, err := f.db.getAllCollections(f.ctx, f.txn) + require.NoError(t, err) + + require.Equal(t, 1, len(cols)) + col := cols[0] + require.Equal(t, 2, len(col.Description().Indexes)) + require.NotEmpty(t, col.Description().Indexes[0].Name) + require.NotEmpty(t, col.Description().Indexes[1].Name) +} + +func TestCreateIndex_IfAttemptToIndexOnUnsupportedType_ReturnError(t *testing.T) { + f := newIndexTestFixtureBare(t) + + const unsupportedKind = client.FieldKind_BOOL_ARRAY + + desc := client.CollectionDescription{ + Name: "testTypeCol", + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + { + Name: "field", + Kind: unsupportedKind, + Typ: client.LWW_REGISTER, + }, + }, + }, + } + + collection := f.createCollection(desc) + + indexDesc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: "field", Direction: client.Ascending}, + }, + } + + _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + require.ErrorIs(f.t, err, NewErrUnsupportedIndexFieldType(unsupportedKind)) + f.commitTxn() +} + +func TestCreateIndex_IfFailedToReadIndexUponRetrievingCollectionDesc_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + testErr := errors.New("test error") + + mockedTxn := f.mockTxn().ClearSystemStore() + onSystemStore := mockedTxn.MockSystemstore.EXPECT() + + colIndexKey := core.NewCollectionIndexKey(f.users.Description().Name, "") + matchPrefixFunc := func(q query.Query) bool { + res := q.Prefix == colIndexKey.ToDS().String() + return res + } + + onSystemStore.Query(mock.Anything, mock.MatchedBy(matchPrefixFunc)).Return(nil, testErr) + + descData, err := json.Marshal(getUsersCollectionDesc()) + require.NoError(t, err) + + onSystemStore.Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithValues(t, []byte("schemaID")), nil) + onSystemStore.Get(mock.Anything, mock.Anything).Unset() + onSystemStore.Get(mock.Anything, mock.Anything).Return(descData, nil) + + f.stubSystemStore(onSystemStore) + + _, err = f.db.getAllCollections(f.ctx, f.txn) + require.ErrorIs(t, err, testErr) +} + +func TestGetIndexes_ShouldReturnListOfAllExistingIndexes(t *testing.T) { + f := newIndexTestFixture(t) + + usersIndexDesc := client.IndexDescription{ + Name: "users_name_index", + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + _, err := f.createCollectionIndexFor(usersColName, usersIndexDesc) + assert.NoError(t, err) + + f.createCollection(getProductsCollectionDesc()) + productsIndexDesc := client.IndexDescription{ + Name: "products_description_index", + Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, + } + _, err = f.createCollectionIndexFor(productsColName, productsIndexDesc) + assert.NoError(t, err) + + indexes, err := f.getAllIndexes() + assert.NoError(t, err) + + require.Equal(t, 2, len(indexes)) + + assert.Equal(t, 1, len(indexes[usersColName])) + assert.Equal(t, usersIndexDesc.Name, indexes[usersColName][0].Name) + assert.Equal(t, 1, len(indexes[productsColName])) + assert.Equal(t, productsIndexDesc.Name, indexes[productsColName][0].Name) +} + +func TestGetIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + indexKey := core.NewCollectionIndexKey(usersColName, "users_name_index") + err := f.txn.Systemstore().Put(f.ctx, indexKey.ToDS(), []byte("invalid")) + assert.NoError(t, err) + + _, err = f.getAllIndexes() + assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) +} + +func TestGetIndexes_IfInvalidIndexKeyIsStored_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + indexKey := core.NewCollectionIndexKey(usersColName, "users_name_index") + key := ds.NewKey(indexKey.ToString() + "/invalid") + desc := client.IndexDescription{ + Name: "some_index_name", + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Ascending}, + }, + } + descData, _ := json.Marshal(desc) + err := f.txn.Systemstore().Put(f.ctx, key, descData) + assert.NoError(t, err) + + _, err = f.getAllIndexes() + assert.ErrorIs(t, err, NewErrInvalidStoredIndexKey(key.String())) +} + +func TestGetIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). + Return(nil, errors.New("test error")) + + _, err := f.getAllIndexes() + assert.ErrorIs(t, err, NewErrFailedToCreateCollectionQuery(nil)) +} + +func TestGetIndexes_IfSystemStoreFails_ShouldCloseIterator(t *testing.T) { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + q := mocks.NewQueryResultsWithValues(t) + q.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(q, nil) + + _, _ = f.getAllIndexes() +} + +func TestGetIndexes_IfSystemStoreQueryIteratorFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + testErr := errors.New("test error") + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + q := mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(q, nil) + + _, err := f.getAllIndexes() + assert.ErrorIs(t, err, testErr) +} + +func TestGetIndexes_IfSystemStoreHasInvalidData_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + q := mocks.NewQueryResultsWithValues(t, []byte("invalid")) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(q, nil) + + _, err := f.getAllIndexes() + assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) +} + +func TestGetIndexes_IfFailsToReadSeqNumber_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + testCases := []struct { + Name string + StubSystemStore func(*mocks.DSReaderWriter_Expecter, core.Key) + }{ + { + Name: "Read Sequence Number", + StubSystemStore: func(onSystemStore *mocks.DSReaderWriter_Expecter, seqKey core.Key) { + onSystemStore.Get(mock.Anything, seqKey.ToDS()).Return(nil, testErr) + }, + }, + { + Name: "Increment Sequence Number", + StubSystemStore: func(onSystemStore *mocks.DSReaderWriter_Expecter, seqKey core.Key) { + onSystemStore.Put(mock.Anything, seqKey.ToDS(), mock.Anything).Return(testErr) + }, + }, + } + + for _, tc := range testCases { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + onSystemStore := mockedTxn.MockSystemstore.EXPECT() + f.resetSystemStoreStubs(onSystemStore) + + seqKey := core.NewSequenceKey(fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, f.users.ID())) + tc.StubSystemStore(onSystemStore, seqKey) + f.stubSystemStore(onSystemStore) + + _, err := f.createCollectionIndexFor(f.users.Name(), getUsersIndexDescOnName()) + assert.ErrorIs(t, err, testErr) + } +} + +func TestGetCollectionIndexes_ShouldReturnListOfCollectionIndexes(t *testing.T) { + f := newIndexTestFixture(t) + + usersIndexDesc := client.IndexDescription{ + Name: "users_name_index", + Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, + } + _, err := f.createCollectionIndexFor(usersColName, usersIndexDesc) + assert.NoError(t, err) + + f.createCollection(getProductsCollectionDesc()) + productsIndexDesc := client.IndexDescription{ + Name: "products_description_index", + Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, + } + _, err = f.createCollectionIndexFor(productsColName, productsIndexDesc) + assert.NoError(t, err) + + userIndexes, err := f.getCollectionIndexes(usersColName) + assert.NoError(t, err) + require.Equal(t, 1, len(userIndexes)) + usersIndexDesc.ID = 1 + assert.Equal(t, usersIndexDesc, userIndexes[0]) + + productIndexes, err := f.getCollectionIndexes(productsColName) + assert.NoError(t, err) + require.Equal(t, 1, len(productIndexes)) + productsIndexDesc.ID = 1 + assert.Equal(t, productsIndexDesc, productIndexes[0]) +} + +func TestGetCollectionIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). + Return(nil, errors.New("test error")) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) + + _, err := f.getCollectionIndexes(usersColName) + assert.ErrorIs(t, err, NewErrFailedToCreateCollectionQuery(nil)) +} + +func TestGetCollectionIndexes_IfSystemStoreFails_ShouldCloseIterator(t *testing.T) { + f := newIndexTestFixture(t) + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + query := mocks.NewQueryResultsWithValues(t) + query.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(query, nil) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) + + _, _ = f.getCollectionIndexes(usersColName) +} + +func TestGetCollectionIndexes_IfSystemStoreQueryIteratorFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + testErr := errors.New("test error") + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}), nil) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) + + _, err := f.getCollectionIndexes(usersColName) + assert.ErrorIs(t, err, testErr) +} + +func TestGetCollectionIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + indexKey := core.NewCollectionIndexKey(usersColName, "users_name_index") + err := f.txn.Systemstore().Put(f.ctx, indexKey.ToDS(), []byte("invalid")) + assert.NoError(t, err) + + _, err = f.getCollectionIndexes(usersColName) + assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) +} + +func TestCollectionGetIndexes_ShouldReturnIndexes(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + indexes, err := f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + + require.Equal(t, 1, len(indexes)) + assert.Equal(t, testUsersColIndexName, indexes[0].Name) +} + +func TestCollectionGetIndexes_ShouldCloseQueryIterator(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(f.t) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + queryResults := mocks.NewQueryResultsWithValues(f.t) + queryResults.EXPECT().Close().Unset() + queryResults.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). + Return(queryResults, nil) + + _, err := f.users.WithTxn(mockedTxn).GetIndexes(f.ctx) + assert.NoError(t, err) +} + +func TestCollectionGetIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + testCases := []struct { + Name string + ExpectedError error + GetMockSystemstore func(t *testing.T) *mocks.DSReaderWriter + }{ + { + Name: "Query fails", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything).Unset() + store.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) + return store + }, + }, + { + Name: "Query iterator fails", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}), nil) + return store + }, + }, + { + Name: "Query iterator returns invalid value", + ExpectedError: NewErrInvalidStoredIndex(nil), + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithValues(t, []byte("invalid")), nil) + return store + }, + }, + } + + for _, testCase := range testCases { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = testCase.GetMockSystemstore(t) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + + _, err := f.users.WithTxn(mockedTxn).GetIndexes(f.ctx) + require.ErrorIs(t, err, testCase.ExpectedError) + } +} + +func TestCollectionGetIndexes_IfFailsToCreateTxn_ShouldNotCache(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + testErr := errors.New("test error") + + workingRootStore := f.db.rootstore + mockedRootStore := mocks.NewRootStore(t) + f.db.rootstore = mockedRootStore + mockedRootStore.EXPECT().NewTransaction(mock.Anything, mock.Anything).Return(nil, testErr) + + _, err := f.users.GetIndexes(f.ctx) + require.ErrorIs(t, err, testErr) + + f.db.rootstore = workingRootStore + + indexes, err := f.users.GetIndexes(f.ctx) + require.NoError(t, err) + + require.Equal(t, 1, len(indexes)) + assert.Equal(t, testUsersColIndexName, indexes[0].Name) +} + +func TestCollectionGetIndexes_IfStoredIndexWithUnsupportedType_ReturnError(t *testing.T) { + f := newIndexTestFixtureBare(t) + + const unsupportedKind = client.FieldKind_BOOL_ARRAY + + desc := client.CollectionDescription{ + Name: "testTypeCol", + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + { + Name: "field", + Kind: unsupportedKind, + Typ: client.LWW_REGISTER, + }, + }, + }, + } + + collection := f.createCollection(desc) + + indexDesc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: "field", Direction: client.Ascending}, + }, + } + indexDescData, err := json.Marshal(indexDesc) + require.NoError(t, err) + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithValues(t, indexDescData), nil) + + _, err = collection.WithTxn(mockedTxn).GetIndexes(f.ctx) + require.ErrorIs(t, err, NewErrUnsupportedIndexFieldType(unsupportedKind)) +} + +func TestCollectionGetIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + f.createUserCollectionIndexOnAge() + + indexes, err := f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + require.Len(t, indexes, 2) + require.ElementsMatch(t, + []string{testUsersColIndexName, testUsersColIndexAge}, + []string{indexes[0].Name, indexes[1].Name}, + ) + require.ElementsMatch(t, []uint32{1, 2}, []uint32{indexes[0].ID, indexes[1].ID}) +} + +func TestCollectionGetIndexes_IfIndexIsCreated_ReturnUpdateIndexes(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + indexes, err := f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + assert.Len(t, indexes, 1) + + _, err = f.users.CreateIndex(f.ctx, getUsersIndexDescOnAge()) + assert.NoError(t, err) + + indexes, err = f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + assert.Len(t, indexes, 2) +} + +func TestCollectionGetIndexes_IfIndexIsDropped_ReturnUpdateIndexes(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + f.createUserCollectionIndexOnAge() + + indexes, err := f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + assert.Len(t, indexes, 2) + + err = f.users.DropIndex(f.ctx, testUsersColIndexName) + assert.NoError(t, err) + + indexes, err = f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + assert.Len(t, indexes, 1) + assert.Equal(t, indexes[0].Name, testUsersColIndexAge) + + err = f.users.DropIndex(f.ctx, testUsersColIndexAge) + assert.NoError(t, err) + + indexes, err = f.users.GetIndexes(f.ctx) + assert.NoError(t, err) + assert.Len(t, indexes, 0) +} + +func TestCollectionGetIndexes_ShouldReturnIndexesInOrderedByName(t *testing.T) { + f := newIndexTestFixtureBare(t) + colDesc := client.CollectionDescription{ + Name: "testCollection", + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + }, + }, + } + const ( + num = 30 + fieldNamePrefix = "field_" + indexNamePrefix = "index_" + ) + + toSuffix := func(i int) string { + return fmt.Sprintf("%02d", i) + } + + for i := 1; i <= num; i++ { + colDesc.Schema.Fields = append(colDesc.Schema.Fields, + client.FieldDescription{ + Name: fieldNamePrefix + toSuffix(i), + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }) + } + + collection := f.createCollection(colDesc) + + for i := 1; i <= num; i++ { + iStr := toSuffix(i) + indexDesc := client.IndexDescription{ + Name: indexNamePrefix + iStr, + Fields: []client.IndexedFieldDescription{ + {Name: fieldNamePrefix + iStr, Direction: client.Ascending}, + }, + } + + _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + require.NoError(t, err) + } + f.commitTxn() + + indexes, err := collection.GetIndexes(f.ctx) + require.NoError(t, err) + require.Len(t, indexes, num) + + for i := 1; i <= num; i++ { + assert.Equal(t, indexNamePrefix+toSuffix(i), indexes[i-1].Name, "i = %d", i) + } +} + +func TestDropIndex_ShouldDeleteIndex(t *testing.T) { + f := newIndexTestFixture(t) + desc := f.createUserCollectionIndexOnName() + + err := f.dropIndex(usersColName, desc.Name) + assert.NoError(t, err) + + indexKey := core.NewCollectionIndexKey(usersColName, desc.Name) + _, err = f.txn.Systemstore().Get(f.ctx, indexKey.ToDS()) + assert.Error(t, err) +} + +func TestDropIndex_IfStorageFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + desc := f.createUserCollectionIndexOnName() + + f.db.Close(f.ctx) + + err := f.dropIndex(productsColName, desc.Name) + assert.Error(t, err) +} + +func TestDropIndex_IfCollectionDoesntExist_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + err := f.dropIndex(productsColName, "any_name") + assert.ErrorIs(t, err, NewErrCanNotReadCollection(usersColName, nil)) +} + +func TestDropIndex_IfFailsToQuerySystemStorage_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + desc := f.createUserCollectionIndexOnName() + + testErr := errors.New("test error") + + mockTxn := f.mockTxn().ClearSystemStore() + systemStoreOn := mockTxn.MockSystemstore.EXPECT() + systemStoreOn.Query(mock.Anything, mock.Anything).Return(nil, testErr) + f.stubSystemStore(systemStoreOn) + + err := f.dropIndex(usersColName, desc.Name) + require.ErrorIs(t, err, testErr) +} + +func TestDropIndex_IfFailsToCreateTxn_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + testErr := errors.New("test error") + + mockedRootStore := mocks.NewRootStore(t) + mockedRootStore.EXPECT().NewTransaction(mock.Anything, mock.Anything).Return(nil, testErr) + f.db.rootstore = mockedRootStore + + err := f.users.DropIndex(f.ctx, testUsersColIndexName) + require.ErrorIs(t, err, testErr) +} + +func TestDropIndex_IfFailsToDeleteFromStorage_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + testErr := errors.New("test error") + + mockedTxn := f.mockTxn().ClearSystemStore() + systemStoreOn := mockedTxn.MockSystemstore.EXPECT() + systemStoreOn.Delete(mock.Anything, mock.Anything).Return(testErr) + f.stubSystemStore(systemStoreOn) + mockedTxn.MockDatastore.EXPECT().Query(mock.Anything, mock.Anything).Maybe(). + Return(mocks.NewQueryResultsWithValues(t), nil) + + err := f.users.WithTxn(mockedTxn).DropIndex(f.ctx, testUsersColIndexName) + require.ErrorIs(t, err, testErr) +} + +func TestDropIndex_ShouldUpdateCollectionsDescription(t *testing.T) { + f := newIndexTestFixture(t) + col := f.users.WithTxn(f.txn) + _, err := col.CreateIndex(f.ctx, getUsersIndexDescOnName()) + require.NoError(t, err) + indOnAge, err := col.CreateIndex(f.ctx, getUsersIndexDescOnAge()) + require.NoError(t, err) + f.commitTxn() + + err = f.users.DropIndex(f.ctx, testUsersColIndexName) + require.NoError(t, err) + + assert.ElementsMatch(t, []client.IndexDescription{indOnAge}, + f.users.Description().Indexes) + + err = f.users.DropIndex(f.ctx, testUsersColIndexAge) + require.NoError(t, err) + + assert.ElementsMatch(t, []client.IndexDescription{}, f.users.Description().Indexes) +} + +func TestDropIndex_IfIndexWithNameDoesNotExist_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + const name = "not_existing_index" + err := f.users.DropIndex(f.ctx, name) + require.ErrorIs(t, err, NewErrIndexWithNameDoesNotExists(name)) +} + +func TestDropIndex_IfSystemStoreFails_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + f := newIndexTestFixture(t) + + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + + err := f.users.WithTxn(mockedTxn).DropIndex(f.ctx, testUsersColIndexName) + require.ErrorIs(t, err, testErr) +} + +func TestDropAllIndexes_ShouldDeleteAllIndexes(t *testing.T) { + f := newIndexTestFixture(t) + _, err := f.createCollectionIndexFor(usersColName, client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: usersNameFieldName, Direction: client.Ascending}, + }, + }) + assert.NoError(f.t, err) + + _, err = f.createCollectionIndexFor(usersColName, client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: usersAgeFieldName, Direction: client.Ascending}, + }, + }) + assert.NoError(f.t, err) + + assert.Equal(t, 2, f.countIndexPrefixes(usersColName, "")) + + err = f.users.dropAllIndexes(f.ctx, f.txn) + assert.NoError(t, err) + + assert.Equal(t, 0, f.countIndexPrefixes(usersColName, "")) +} + +func TestDropAllIndexes_IfStorageFails_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + f.db.Close(f.ctx) + + err := f.users.dropAllIndexes(f.ctx, f.txn) + assert.Error(t, err) +} + +func TestDropAllIndexes_IfSystemStorageFails_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + testCases := []struct { + Name string + ExpectedError error + GetMockSystemstore func(t *testing.T) *mocks.DSReaderWriter + }{ + { + Name: "Query fails", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything).Unset() + store.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) + return store + }, + }, + { + Name: "Query iterator fails", + ExpectedError: testErr, + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}), nil) + return store + }, + }, + { + Name: "System storage fails to delete", + ExpectedError: NewErrInvalidStoredIndex(nil), + GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { + store := mocks.NewDSReaderWriter(t) + store.EXPECT().Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithValues(t, []byte{}), nil) + store.EXPECT().Delete(mock.Anything, mock.Anything).Maybe().Return(testErr) + return store + }, + }, + } + + for _, testCase := range testCases { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = testCase.GetMockSystemstore(t) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + + err := f.users.dropAllIndexes(f.ctx, f.txn) + assert.ErrorIs(t, err, testErr, testCase.Name) + } +} + +func TestDropAllIndexes_ShouldCloseQueryIterator(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + q := mocks.NewQueryResultsWithValues(t, []byte{}) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(nil) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(q, nil) + mockedTxn.MockSystemstore.EXPECT().Delete(mock.Anything, mock.Anything).Maybe().Return(nil) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() + + _ = f.users.dropAllIndexes(f.ctx, f.txn) +} + +func TestNewCollectionIndex_IfDescriptionHasNoFields_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + desc := getUsersIndexDescOnName() + desc.Fields = nil + _, err := NewCollectionIndex(f.users, desc) + require.ErrorIs(t, err, NewErrIndexDescHasNoFields(desc)) +} + +func TestNewCollectionIndex_IfDescriptionHasNonExistingField_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + desc := getUsersIndexDescOnName() + desc.Fields[0].Name = "non_existing_field" + _, err := NewCollectionIndex(f.users, desc) + require.ErrorIs(t, err, NewErrIndexDescHasNonExistingField(desc, desc.Fields[0].Name)) +} diff --git a/db/indexed_docs_test.go b/db/indexed_docs_test.go new file mode 100644 index 0000000000..2c89d5f472 --- /dev/null +++ b/db/indexed_docs_test.go @@ -0,0 +1,1049 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "testing" + "time" + + ipfsDatastore "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/datastore/mocks" + "github.com/sourcenetwork/defradb/db/fetcher" + fetcherMocks "github.com/sourcenetwork/defradb/db/fetcher/mocks" + "github.com/sourcenetwork/defradb/planner/mapper" +) + +type userDoc struct { + Name string `json:"name"` + Age int `json:"age"` + Weight float64 `json:"weight"` +} + +type productDoc struct { + ID int `json:"id"` + Price float64 `json:"price"` + Category string `json:"category"` + Available bool `json:"available"` +} + +func (f *indexTestFixture) saveDocToCollection(doc *client.Document, col client.Collection) { + err := col.Create(f.ctx, doc) + require.NoError(f.t, err) + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) +} + +func (f *indexTestFixture) newUserDoc(name string, age int) *client.Document { + d := userDoc{Name: name, Age: age, Weight: 154.1} + data, err := json.Marshal(d) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(data) + require.NoError(f.t, err) + return doc +} + +func (f *indexTestFixture) newProdDoc(id int, price float64, cat string) *client.Document { + d := productDoc{ID: id, Price: price, Category: cat} + data, err := json.Marshal(d) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(data) + require.NoError(f.t, err) + return doc +} + +// indexKeyBuilder is a helper for building index keys that can be turned into a string. +// The format of the non-unique index key is: "////" +// Example: "/5/1/12/bae-61cd6879-63ca-5ca9-8731-470a3c1dac69" +type indexKeyBuilder struct { + f *indexTestFixture + colName string + fieldName string + doc *client.Document + values [][]byte + isUnique bool +} + +func newIndexKeyBuilder(f *indexTestFixture) *indexKeyBuilder { + return &indexKeyBuilder{f: f} +} + +func (b *indexKeyBuilder) Col(colName string) *indexKeyBuilder { + b.colName = colName + return b +} + +// Field sets the field name for the index key. +// If the field name is not set, the index key will contain only collection id. +// When building a key it will it will find the field id to use in the key. +func (b *indexKeyBuilder) Field(fieldName string) *indexKeyBuilder { + b.fieldName = fieldName + return b +} + +// Doc sets the document for the index key. +// For non-unique index keys, it will try to find the field value in the document +// corresponding to the field name set in the builder. +// As the last value in the index key, it will use the document id. +func (b *indexKeyBuilder) Doc(doc *client.Document) *indexKeyBuilder { + b.doc = doc + return b +} + +// Values sets the values for the index key. +// It will override the field values stored in the document. +func (b *indexKeyBuilder) Values(values ...[]byte) *indexKeyBuilder { + b.values = values + return b +} + +func (b *indexKeyBuilder) Unique() *indexKeyBuilder { + b.isUnique = true + return b +} + +func (b *indexKeyBuilder) Build() core.IndexDataStoreKey { + key := core.IndexDataStoreKey{} + + if b.colName == "" { + return key + } + + cols, err := b.f.db.getAllCollections(b.f.ctx, b.f.txn) + require.NoError(b.f.t, err) + var collection client.Collection + for _, col := range cols { + if col.Name() == b.colName { + collection = col + break + } + } + if collection == nil { + panic(errors.New("collection not found")) + } + key.CollectionID = collection.ID() + + if b.fieldName == "" { + return key + } + + indexes, err := collection.GetIndexes(b.f.ctx) + require.NoError(b.f.t, err) + for _, index := range indexes { + if index.Fields[0].Name == b.fieldName { + key.IndexID = index.ID + break + } + } + + if b.doc != nil { + var fieldBytesVal []byte + var writeableVal client.WriteableValue + if len(b.values) == 0 { + fieldVal, err := b.doc.GetValue(b.fieldName) + require.NoError(b.f.t, err) + var ok bool + writeableVal, ok = fieldVal.(client.WriteableValue) + require.True(b.f.t, ok) + } else { + writeableVal = client.NewCBORValue(client.LWW_REGISTER, b.values[0]) + } + fieldBytesVal, err = writeableVal.Bytes() + require.NoError(b.f.t, err) + + key.FieldValues = [][]byte{fieldBytesVal, []byte(b.doc.Key().String())} + } else if len(b.values) > 0 { + key.FieldValues = b.values + } + + return key +} + +func (f *indexTestFixture) getPrefixFromDataStore(prefix string) [][]byte { + q := query.Query{Prefix: prefix} + res, err := f.txn.Datastore().Query(f.ctx, q) + require.NoError(f.t, err) + + var keys [][]byte + for r := range res.Next() { + keys = append(keys, r.Entry.Value) + } + return keys +} + +func (f *indexTestFixture) mockTxn() *mocks.MultiStoreTxn { + mockedTxn := mocks.NewTxnWithMultistore(f.t) + + systemStoreOn := mockedTxn.MockSystemstore.EXPECT() + f.resetSystemStoreStubs(systemStoreOn) + f.stubSystemStore(systemStoreOn) + + f.txn = mockedTxn + return mockedTxn +} + +func (*indexTestFixture) resetSystemStoreStubs(systemStoreOn *mocks.DSReaderWriter_Expecter) { + systemStoreOn.Query(mock.Anything, mock.Anything).Unset() + systemStoreOn.Get(mock.Anything, mock.Anything).Unset() + systemStoreOn.Put(mock.Anything, mock.Anything, mock.Anything).Unset() +} + +func (f *indexTestFixture) stubSystemStore(systemStoreOn *mocks.DSReaderWriter_Expecter) { + desc := getUsersIndexDescOnName() + desc.ID = 1 + indexOnNameDescData, err := json.Marshal(desc) + require.NoError(f.t, err) + + colIndexKey := core.NewCollectionIndexKey(usersColName, "") + matchPrefixFunc := func(q query.Query) bool { + return q.Prefix == colIndexKey.ToDS().String() + } + + systemStoreOn.Query(mock.Anything, mock.MatchedBy(matchPrefixFunc)). + RunAndReturn(func(context.Context, query.Query) (query.Results, error) { + return mocks.NewQueryResultsWithValues(f.t, indexOnNameDescData), nil + }).Maybe() + systemStoreOn.Query(mock.Anything, mock.MatchedBy(matchPrefixFunc)).Maybe(). + Return(mocks.NewQueryResultsWithValues(f.t, indexOnNameDescData), nil) + systemStoreOn.Query(mock.Anything, mock.Anything).Maybe(). + Return(mocks.NewQueryResultsWithValues(f.t), nil) + + colKey := core.NewCollectionKey(usersColName) + systemStoreOn.Get(mock.Anything, colKey.ToDS()).Maybe().Return([]byte(userColVersionID), nil) + + colVersionIDKey := core.NewCollectionSchemaVersionKey(userColVersionID) + colDesc := getUsersCollectionDesc() + colDesc.ID = 1 + for i := range colDesc.Schema.Fields { + colDesc.Schema.Fields[i].ID = client.FieldID(i) + } + colDescBytes, err := json.Marshal(colDesc) + require.NoError(f.t, err) + systemStoreOn.Get(mock.Anything, colVersionIDKey.ToDS()).Maybe().Return(colDescBytes, nil) + + colIndexOnNameKey := core.NewCollectionIndexKey(usersColName, testUsersColIndexName) + systemStoreOn.Get(mock.Anything, colIndexOnNameKey.ToDS()).Maybe().Return(indexOnNameDescData, nil) + + if f.users != nil { + sequenceKey := core.NewSequenceKey(fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, f.users.ID())) + systemStoreOn.Get(mock.Anything, sequenceKey.ToDS()).Maybe().Return([]byte{0, 0, 0, 0, 0, 0, 0, 1}, nil) + } + + systemStoreOn.Get(mock.Anything, mock.Anything).Maybe().Return([]byte{}, nil) + + systemStoreOn.Put(mock.Anything, mock.Anything, mock.Anything).Maybe().Return(nil) + + systemStoreOn.Has(mock.Anything, mock.Anything).Maybe().Return(false, nil) + + systemStoreOn.Delete(mock.Anything, mock.Anything).Maybe().Return(nil) +} + +func TestNonUnique_IfDocIsAdded_ShouldBeIndexed(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUnique_IfFailsToStoredIndexedDoc_Error(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + mockTxn := f.mockTxn() + + dataStoreOn := mockTxn.MockDatastore.EXPECT() + dataStoreOn.Put(mock.Anything, mock.Anything, mock.Anything).Unset() + dataStoreOn.Put(mock.Anything, key.ToDS(), mock.Anything).Return(errors.New("error")) + dataStoreOn.Put(mock.Anything, mock.Anything, mock.Anything).Return(nil) + + err := f.users.WithTxn(mockTxn).Create(f.ctx, doc) + require.ErrorIs(f.t, err, NewErrFailedToStoreIndexedField("name", nil)) +} + +func TestNonUnique_IfDocDoesNotHaveIndexedField_SkipIndex(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + data, err := json.Marshal(struct { + Age int `json:"age"` + Weight float64 `json:"weight"` + }{Age: 21, Weight: 154.1}) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(data) + require.NoError(f.t, err) + + err = f.users.Create(f.ctx, doc) + require.NoError(f.t, err) + + key := newIndexKeyBuilder(f).Col(usersColName).Build() + prefixes := f.getPrefixFromDataStore(key.ToString()) + assert.Len(t, prefixes, 0) +} + +func TestNonUnique_IfSystemStorageHasInvalidIndexDescription_Error(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + + mockTxn := f.mockTxn().ClearSystemStore() + systemStoreOn := mockTxn.MockSystemstore.EXPECT() + systemStoreOn.Query(mock.Anything, mock.Anything). + Return(mocks.NewQueryResultsWithValues(t, []byte("invalid")), nil) + + err := f.users.WithTxn(mockTxn).Create(f.ctx, doc) + require.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) +} + +func TestNonUnique_IfSystemStorageFailsToReadIndexDesc_Error(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + + testErr := errors.New("test error") + + mockTxn := f.mockTxn().ClearSystemStore() + systemStoreOn := mockTxn.MockSystemstore.EXPECT() + systemStoreOn.Query(mock.Anything, mock.Anything). + Return(nil, testErr) + + err := f.users.WithTxn(mockTxn).Create(f.ctx, doc) + require.ErrorIs(t, err, testErr) +} + +func TestNonUnique_IfIndexIntField_StoreIt(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnAge() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + key := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc).Build() + + data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUnique_IfMultipleCollectionsWithIndexes_StoreIndexWithCollectionID(t *testing.T) { + f := newIndexTestFixtureBare(t) + users := f.createCollection(getUsersCollectionDesc()) + products := f.createCollection(getProductsCollectionDesc()) + + _, err := f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnName()) + require.NoError(f.t, err) + _, err = f.createCollectionIndexFor(products.Name(), getProductsIndexDescOnCategory()) + require.NoError(f.t, err) + f.commitTxn() + + userDoc := f.newUserDoc("John", 21) + prodDoc := f.newProdDoc(1, 3, "games") + + err = users.Create(f.ctx, userDoc) + require.NoError(f.t, err) + err = products.Create(f.ctx, prodDoc) + require.NoError(f.t, err) + f.commitTxn() + + userDocKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(userDoc).Build() + prodDocKey := newIndexKeyBuilder(f).Col(productsColName).Field(productsCategoryFieldName).Doc(prodDoc).Build() + + data, err := f.txn.Datastore().Get(f.ctx, userDocKey.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) + data, err = f.txn.Datastore().Get(f.ctx, prodDocKey.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUnique_IfMultipleIndexes_StoreIndexWithIndexID(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + f.createUserCollectionIndexOnAge() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + nameKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + ageKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc).Build() + + data, err := f.txn.Datastore().Get(f.ctx, nameKey.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) + data, err = f.txn.Datastore().Get(f.ctx, ageKey.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUnique_StoringIndexedFieldValueOfDifferentTypes(t *testing.T) { + f := newIndexTestFixtureBare(t) + + now := time.Now() + nowStr := now.Format(time.RFC3339) + + testCase := []struct { + Name string + FieldKind client.FieldKind + // FieldVal is the value the index will receive for serialization + FieldVal any + ShouldFail bool + }{ + {Name: "invalid int", FieldKind: client.FieldKind_INT, FieldVal: "invalid", ShouldFail: true}, + {Name: "invalid float", FieldKind: client.FieldKind_FLOAT, FieldVal: "invalid", ShouldFail: true}, + {Name: "invalid bool", FieldKind: client.FieldKind_BOOL, FieldVal: "invalid", ShouldFail: true}, + {Name: "invalid datetime", FieldKind: client.FieldKind_DATETIME, FieldVal: nowStr[1:], ShouldFail: true}, + {Name: "invalid datetime type", FieldKind: client.FieldKind_DATETIME, FieldVal: 1, ShouldFail: true}, + + {Name: "valid int", FieldKind: client.FieldKind_INT, FieldVal: 12}, + {Name: "valid float", FieldKind: client.FieldKind_FLOAT, FieldVal: 36.654}, + {Name: "valid bool true", FieldKind: client.FieldKind_BOOL, FieldVal: true}, + {Name: "valid bool false", FieldKind: client.FieldKind_BOOL, FieldVal: false}, + {Name: "valid datetime string", FieldKind: client.FieldKind_DATETIME, FieldVal: nowStr}, + {Name: "valid empty string", FieldKind: client.FieldKind_STRING, FieldVal: ""}, + } + + for i, tc := range testCase { + desc := client.CollectionDescription{ + Name: "testTypeCol" + strconv.Itoa(i), + Schema: client.SchemaDescription{ + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + { + Name: "field", + Kind: tc.FieldKind, + Typ: client.LWW_REGISTER, + }, + }, + }, + } + + collection := f.createCollection(desc) + + indexDesc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + {Name: "field", Direction: client.Ascending}, + }, + } + + _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + require.NoError(f.t, err) + f.commitTxn() + + d := struct { + Field any `json:"field"` + }{Field: tc.FieldVal} + data, err := json.Marshal(d) + require.NoError(f.t, err) + doc, err := client.NewDocFromJSON(data) + require.NoError(f.t, err) + + err = collection.Create(f.ctx, doc) + f.commitTxn() + if tc.ShouldFail { + require.ErrorIs(f.t, err, + NewErrInvalidFieldValue(tc.FieldKind, tc.FieldVal), "test case: %s", tc.Name) + } else { + assertMsg := fmt.Sprintf("test case: %s", tc.Name) + require.NoError(f.t, err, assertMsg) + + keyBuilder := newIndexKeyBuilder(f).Col(collection.Name()).Field("field").Doc(doc) + key := keyBuilder.Build() + + keyStr := key.ToDS() + data, err := f.txn.Datastore().Get(f.ctx, keyStr) + require.NoError(t, err, assertMsg) + assert.Len(t, data, 0, assertMsg) + } + } +} + +func TestNonUnique_IfIndexedFieldIsNil_StoreItAsNil(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + docJSON, err := json.Marshal(struct { + Age int `json:"age"` + }{Age: 44}) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(docJSON) + require.NoError(f.t, err) + + f.saveDocToCollection(doc, f.users) + + key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc). + Values([]byte(nil)).Build() + + data, err := f.txn.Datastore().Get(f.ctx, key.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUniqueCreate_ShouldIndexExistingDocs(t *testing.T) { + f := newIndexTestFixture(t) + + doc1 := f.newUserDoc("John", 21) + f.saveDocToCollection(doc1, f.users) + doc2 := f.newUserDoc("Islam", 18) + f.saveDocToCollection(doc2, f.users) + + f.createUserCollectionIndexOnName() + + key1 := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc1).Build() + key2 := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc2).Build() + + data, err := f.txn.Datastore().Get(f.ctx, key1.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) + data, err = f.txn.Datastore().Get(f.ctx, key2.ToDS()) + require.NoError(t, err) + assert.Len(t, data, 0) +} + +func TestNonUniqueCreate_IfUponIndexingExistingDocsFetcherFails_ReturnError(t *testing.T) { + testError := errors.New("test error") + + cases := []struct { + Name string + PrepareFetcher func() fetcher.Fetcher + }{ + { + Name: "Fails to init", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset() + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to start", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().Start(mock.Anything, mock.Anything).Unset() + f.EXPECT().Start(mock.Anything, mock.Anything).Return(testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to fetch next decoded", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().FetchNextDecoded(mock.Anything).Unset() + f.EXPECT().FetchNextDecoded(mock.Anything).Return(nil, fetcher.ExecInfo{}, testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to close", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().FetchNextDecoded(mock.Anything).Unset() + f.EXPECT().FetchNextDecoded(mock.Anything).Return(nil, fetcher.ExecInfo{}, nil) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(testError) + return f + }, + }, + } + + for _, tc := range cases { + f := newIndexTestFixture(t) + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + f.users.fetcherFactory = tc.PrepareFetcher + key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) + require.ErrorIs(t, err, testError, tc.Name) + + _, err = f.txn.Datastore().Get(f.ctx, key.ToDS()) + require.Error(t, err, tc.Name) + } +} + +func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + fieldKeyString := core.DataStoreKey{ + CollectionID: f.users.desc.IDString(), + }.WithDocKey(doc.Key().String()). + WithFieldId("1"). + WithValueFlag(). + ToString() + + invalidKeyString := fieldKeyString + "/doesn't matter/" + + // Insert an invalid key within the document prefix, this will generate an error within the fetcher. + f.users.db.multistore.Datastore().Put(f.ctx, ipfsDatastore.NewKey(invalidKeyString), []byte("doesn't matter")) + + _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) + require.ErrorIs(f.t, err, core.ErrInvalidKey) +} + +func TestNonUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { + f := newIndexTestFixtureBare(t) + users := f.createCollection(getUsersCollectionDesc()) + _, err := f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnName()) + require.NoError(f.t, err) + _, err = f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnAge()) + require.NoError(f.t, err) + _, err = f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnWeight()) + require.NoError(f.t, err) + f.commitTxn() + + f.saveDocToCollection(f.newUserDoc("John", 21), users) + f.saveDocToCollection(f.newUserDoc("Islam", 23), users) + + products := f.createCollection(getProductsCollectionDesc()) + _, err = f.createCollectionIndexFor(products.Name(), getProductsIndexDescOnCategory()) + require.NoError(f.t, err) + f.commitTxn() + + f.saveDocToCollection(f.newProdDoc(1, 55, "games"), products) + + userNameKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Build() + userAgeKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Build() + userWeightKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersWeightFieldName).Build() + prodCatKey := newIndexKeyBuilder(f).Col(productsColName).Field(productsCategoryFieldName).Build() + + err = f.dropIndex(usersColName, testUsersColIndexAge) + require.NoError(f.t, err) + + assert.Len(t, f.getPrefixFromDataStore(userNameKey.ToString()), 2) + assert.Len(t, f.getPrefixFromDataStore(userAgeKey.ToString()), 0) + assert.Len(t, f.getPrefixFromDataStore(userWeightKey.ToString()), 2) + assert.Len(t, f.getPrefixFromDataStore(prodCatKey.ToString()), 1) +} + +func TestNonUniqueDrop_IfDataStorageFails_ReturnError(t *testing.T) { + testErr := errors.New("test error") + + testCases := []struct { + description string + prepareSystemStorage func(*mocks.DSReaderWriter_Expecter) + }{ + { + description: "Fails to query data storage", + prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { + mockedDS.Query(mock.Anything, mock.Anything).Unset() + mockedDS.Query(mock.Anything, mock.Anything).Return(nil, testErr) + }, + }, + { + description: "Fails to iterate data storage", + prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { + mockedDS.Query(mock.Anything, mock.Anything).Unset() + q := mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}) + mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(nil) + }, + }, + { + description: "Fails to delete from data storage", + prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { + q := mocks.NewQueryResultsWithResults(t, query.Result{Entry: query.Entry{Key: ""}}) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(nil) + mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) + mockedDS.Delete(mock.Anything, mock.Anything).Unset() + mockedDS.Delete(mock.Anything, mock.Anything).Return(testErr) + }, + }, + { + description: "Fails to close data storage query iterator", + prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { + q := mocks.NewQueryResultsWithResults(t, query.Result{Entry: query.Entry{Key: ""}}) + q.EXPECT().Close().Unset() + q.EXPECT().Close().Return(testErr) + mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) + }, + }, + } + + for _, tc := range testCases { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + mockedTxn.MockDatastore = mocks.NewDSReaderWriter(t) + tc.prepareSystemStorage(mockedTxn.MockDatastore.EXPECT()) + mockedTxn.EXPECT().Datastore().Unset() + mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore) + + err := f.dropIndex(usersColName, testUsersColIndexName) + require.ErrorIs(t, err, testErr, tc.description) + } +} + +func TestNonUniqueDrop_ShouldCloseQueryIterator(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + mockedTxn := f.mockTxn() + + mockedTxn.MockDatastore = mocks.NewDSReaderWriter(f.t) + mockedTxn.EXPECT().Datastore().Unset() + mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore).Maybe() + queryResults := mocks.NewQueryResultsWithValues(f.t) + queryResults.EXPECT().Close().Unset() + queryResults.EXPECT().Close().Return(nil) + mockedTxn.MockDatastore.EXPECT().Query(mock.Anything, mock.Anything). + Return(queryResults, nil) + + err := f.dropIndex(usersColName, testUsersColIndexName) + assert.NoError(t, err) +} + +func TestNonUniqueUpdate_ShouldDeleteOldValueAndStoreNewOne(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + cases := []struct { + Name string + NewValue string + Exec func(doc *client.Document) error + }{ + { + Name: "update", + NewValue: "Islam", + Exec: func(doc *client.Document) error { + return f.users.Update(f.ctx, doc) + }, + }, + { + Name: "save", + NewValue: "Andy", + Exec: func(doc *client.Document) error { + return f.users.Save(f.ctx, doc) + }, + }, + } + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + for _, tc := range cases { + oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + err := doc.Set(usersNameFieldName, tc.NewValue) + require.NoError(t, err) + err = tc.Exec(doc) + require.NoError(t, err) + f.commitTxn() + + newKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) + require.Error(t, err) + _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS()) + require.NoError(t, err) + } +} + +func TestNonUniqueUpdate_IfFailsToReadIndexDescription_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + err := doc.Set(usersNameFieldName, "Islam") + require.NoError(t, err) + + // retrieve the collection without index cached + usersCol, err := f.db.getCollectionByName(f.ctx, f.txn, usersColName) + require.NoError(t, err) + + testErr := errors.New("test error") + + mockedTxn := f.mockTxn() + mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) + mockedTxn.EXPECT().Systemstore().Unset() + mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) + mockedTxn.MockDatastore.EXPECT().Get(mock.Anything, mock.Anything).Unset() + mockedTxn.MockDatastore.EXPECT().Get(mock.Anything, mock.Anything).Return([]byte{}, nil) + + usersCol.(*collection).fetcherFactory = func() fetcher.Fetcher { + return fetcherMocks.NewStubbedFetcher(t) + } + err = usersCol.WithTxn(mockedTxn).Update(f.ctx, doc) + require.ErrorIs(t, err, testErr) +} + +func TestNonUniqueUpdate_IfFetcherFails_ReturnError(t *testing.T) { + testError := errors.New("test error") + + cases := []struct { + Name string + PrepareFetcher func() fetcher.Fetcher + }{ + { + Name: "Fails to init", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset() + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to start", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().Start(mock.Anything, mock.Anything).Unset() + f.EXPECT().Start(mock.Anything, mock.Anything).Return(testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to fetch next decoded", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().FetchNextDecoded(mock.Anything).Unset() + f.EXPECT().FetchNextDecoded(mock.Anything).Return(nil, fetcher.ExecInfo{}, testError) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(nil) + return f + }, + }, + { + Name: "Fails to close", + PrepareFetcher: func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().FetchNextDecoded(mock.Anything).Unset() + // By default the the stubbed fetcher returns an empty, invalid document + // here we need to make sure it reaches the Close call by overriding that default. + f.EXPECT().FetchNextDecoded(mock.Anything).Maybe().Return(nil, fetcher.ExecInfo{}, nil) + f.EXPECT().Close().Unset() + f.EXPECT().Close().Return(testError) + return f + }, + }, + } + + for _, tc := range cases { + t.Log(tc.Name) + + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + f.users.fetcherFactory = tc.PrepareFetcher + oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + err := doc.Set(usersNameFieldName, "Islam") + require.NoError(t, err, tc.Name) + err = f.users.Update(f.ctx, doc) + require.Error(t, err, tc.Name) + + newKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) + require.NoError(t, err, tc.Name) + _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS()) + require.Error(t, err, tc.Name) + } +} + +func TestNonUniqueUpdate_IfFailsToUpdateIndex_ReturnError(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnAge() + + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + f.commitTxn() + + validKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc).Build() + invalidKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc). + Values([]byte("invalid")).Build() + + err := f.txn.Datastore().Delete(f.ctx, validKey.ToDS()) + require.NoError(f.t, err) + err = f.txn.Datastore().Put(f.ctx, invalidKey.ToDS(), []byte{}) + require.NoError(f.t, err) + f.commitTxn() + + err = doc.Set(usersAgeFieldName, 23) + require.NoError(t, err) + err = f.users.Update(f.ctx, doc) + require.Error(t, err) +} + +func TestNonUniqueUpdate_ShouldPassToFetcherOnlyRelevantFields(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + f.createUserCollectionIndexOnAge() + + f.users.fetcherFactory = func() fetcher.Fetcher { + f := fetcherMocks.NewStubbedFetcher(t) + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset() + f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + RunAndReturn(func( + ctx context.Context, + txn datastore.Txn, + col *client.CollectionDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + mapping *core.DocumentMapping, + reverse, showDeleted bool, + ) error { + require.Equal(t, 2, len(fields)) + require.ElementsMatch(t, + []string{usersNameFieldName, usersAgeFieldName}, + []string{fields[0].Name, fields[1].Name}) + return errors.New("early exit") + }) + return f + } + doc := f.newUserDoc("John", 21) + f.saveDocToCollection(doc, f.users) + + err := doc.Set(usersNameFieldName, "Islam") + require.NoError(t, err) + _ = f.users.Update(f.ctx, doc) +} + +func TestNonUniqueUpdate_IfDatastoreFails_ReturnError(t *testing.T) { + testErr := errors.New("error") + + cases := []struct { + Name string + StubDataStore func(*mocks.DSReaderWriter_Expecter) + }{ + { + Name: "Delete old value", + StubDataStore: func(ds *mocks.DSReaderWriter_Expecter) { + ds.Delete(mock.Anything, mock.Anything).Return(testErr) + ds.Get(mock.Anything, mock.Anything).Maybe().Return([]byte{}, nil) + }, + }, + { + Name: "Store new value", + StubDataStore: func(ds *mocks.DSReaderWriter_Expecter) { + ds.Delete(mock.Anything, mock.Anything).Maybe().Return(nil) + ds.Get(mock.Anything, mock.Anything).Maybe().Return([]byte{}, nil) + ds.Put(mock.Anything, mock.Anything, mock.Anything).Maybe().Return(testErr) + }, + }, + } + + for _, tc := range cases { + t.Log(tc.Name) + + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + doc := f.newUserDoc("John", 21) + err := doc.Set(usersNameFieldName, "Islam") + require.NoError(t, err) + + // This is only required as we are using it as a return value + // in production this value will have been set by the fetcher + doc.SchemaVersionID = f.users.Schema().VersionID + + f.users.fetcherFactory = func() fetcher.Fetcher { + df := fetcherMocks.NewStubbedFetcher(t) + df.EXPECT().FetchNextDecoded(mock.Anything).Unset() + df.EXPECT().FetchNextDecoded(mock.Anything).Return(doc, fetcher.ExecInfo{}, nil) + return df + } + + mockedTxn := f.mockTxn() + mockedTxn.MockDatastore = mocks.NewDSReaderWriter(f.t) + tc.StubDataStore(mockedTxn.MockDatastore.EXPECT()) + mockedTxn.EXPECT().Datastore().Unset() + mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore).Maybe() + + err = f.users.WithTxn(mockedTxn).Update(f.ctx, doc) + require.ErrorIs(t, err, testErr) + } +} + +func TestNonUpdate_IfIndexedFieldWasNil_ShouldDeleteIt(t *testing.T) { + f := newIndexTestFixture(t) + f.createUserCollectionIndexOnName() + + docJSON, err := json.Marshal(struct { + Age int `json:"age"` + }{Age: 44}) + require.NoError(f.t, err) + + doc, err := client.NewDocFromJSON(docJSON) + require.NoError(f.t, err) + + f.saveDocToCollection(doc, f.users) + + oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc). + Values([]byte(nil)).Build() + + err = doc.Set(usersNameFieldName, "John") + require.NoError(f.t, err) + + err = f.users.Update(f.ctx, doc) + require.NoError(f.t, err) + f.commitTxn() + + newKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() + + _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS()) + require.NoError(t, err) + _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS()) + require.Error(t, err) +} diff --git a/db/subscriptions.go b/db/subscriptions.go index 3243d0e779..af981ad95f 100644 --- a/db/subscriptions.go +++ b/db/subscriptions.go @@ -15,6 +15,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/planner" ) @@ -58,25 +59,37 @@ func (db *db) handleSubscription( continue } - p := planner.New(ctx, db.WithTxn(txn), txn) + db.handleEvent(ctx, txn, pub, evt, r) - s := r.ToSelect(evt.DocKey, evt.Cid.String()) + txn.Discard(ctx) + } +} - result, err := p.RunSubscriptionRequest(ctx, s) - if err != nil { - pub.Publish(client.GQLResult{ - Errors: []error{err}, - }) - continue - } +func (db *db) handleEvent( + ctx context.Context, + txn datastore.Txn, + pub *events.Publisher[events.Update], + evt events.Update, + r *request.ObjectSubscription, +) { + p := planner.New(ctx, db.WithTxn(txn), txn) - // Don't send anything back to the client if the request yields an empty dataset. - if len(result) == 0 { - continue - } + s := r.ToSelect(evt.DocKey, evt.Cid.String()) + result, err := p.RunSubscriptionRequest(ctx, s) + if err != nil { pub.Publish(client.GQLResult{ - Data: result, + Errors: []error{err}, }) + return } + + // Don't send anything back to the client if the request yields an empty dataset. + if len(result) == 0 { + return + } + + pub.Publish(client.GQLResult{ + Data: result, + }) } diff --git a/db/txn_db.go b/db/txn_db.go index 71e204c356..a7096a46a7 100644 --- a/db/txn_db.go +++ b/db/txn_db.go @@ -186,6 +186,26 @@ func (db *explicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collec return db.getAllCollections(ctx, db.txn) } +// GetAllIndexes gets all the indexes in the database. +func (db *implicitTxnDB) GetAllIndexes( + ctx context.Context, +) (map[client.CollectionName][]client.IndexDescription, error) { + txn, err := db.NewTxn(ctx, true) + if err != nil { + return nil, err + } + defer txn.Discard(ctx) + + return db.getAllIndexes(ctx, txn) +} + +// GetAllIndexes gets all the indexes in the database. +func (db *explicitTxnDB) GetAllIndexes( + ctx context.Context, +) (map[client.CollectionName][]client.IndexDescription, error) { + return db.getAllIndexes(ctx, db.txn) +} + // AddSchema takes the provided GQL schema in SDL format, and applies it to the database, // creating the necessary collections, request types, etc. // @@ -259,6 +279,25 @@ func (db *explicitTxnDB) PatchSchema(ctx context.Context, patchString string) er return db.patchSchema(ctx, db.txn, patchString) } +func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { + txn, err := db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + err = db.lensRegistry.SetMigration(ctx, txn, cfg) + if err != nil { + return err + } + + return txn.Commit(ctx) +} + +func (db *explicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { + return db.lensRegistry.SetMigration(ctx, db.txn, cfg) +} + // SetReplicator adds a new replicator to the database. func (db *implicitTxnDB) SetReplicator(ctx context.Context, rep client.Replicator) error { txn, err := db.NewTxn(ctx, false) @@ -334,3 +373,47 @@ func (db *implicitTxnDB) GetAllP2PCollections(ctx context.Context) ([]string, er func (db *explicitTxnDB) GetAllP2PCollections(ctx context.Context) ([]string, error) { return db.getAllP2PCollections(ctx, db.txn) } + +// BasicImport imports a json dataset. +// filepath must be accessible to the node. +func (db *implicitTxnDB) BasicImport(ctx context.Context, filepath string) error { + txn, err := db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + err = db.basicImport(ctx, txn, filepath) + if err != nil { + return err + } + + return txn.Commit(ctx) +} + +// BasicImport imports a json dataset. +// filepath must be accessible to the node. +func (db *explicitTxnDB) BasicImport(ctx context.Context, filepath string) error { + return db.basicImport(ctx, db.txn, filepath) +} + +// BasicExport exports the current data or subset of data to file in json format. +func (db *implicitTxnDB) BasicExport(ctx context.Context, config *client.BackupConfig) error { + txn, err := db.NewTxn(ctx, true) + if err != nil { + return err + } + defer txn.Discard(ctx) + + err = db.basicExport(ctx, txn, config) + if err != nil { + return err + } + + return txn.Commit(ctx) +} + +// BasicExport exports the current data or subset of data to file in json format. +func (db *explicitTxnDB) BasicExport(ctx context.Context, config *client.BackupConfig) error { + return db.basicExport(ctx, db.txn, config) +} diff --git a/docs/cli/defradb.md b/docs/cli/defradb.md index 9a90455e3c..459f43075d 100644 --- a/docs/cli/defradb.md +++ b/docs/cli/defradb.md @@ -6,10 +6,7 @@ DefraDB Edge Database DefraDB is the edge database to power the user-centric future. -Start a database node, issue a request to a local or remote node, and much more. - -DefraDB is released under the BSL license, (c) 2022 Democratized Data Foundation. -See https://docs.source.network/BSL.txt for more information. +Start a DefraDB node, interact with a local or remote node, and much more. ### Options @@ -28,7 +25,7 @@ See https://docs.source.network/BSL.txt for more information. ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node * [defradb init](defradb_init.md) - Initialize DefraDB's root directory and configuration file * [defradb server-dump](defradb_server-dump.md) - Dumps the state of the entire database * [defradb start](defradb_start.md) - Start a DefraDB node diff --git a/docs/cli/defradb_client.md b/docs/cli/defradb_client.md index 13e87bf46c..7173befb6b 100644 --- a/docs/cli/defradb_client.md +++ b/docs/cli/defradb_client.md @@ -1,11 +1,11 @@ ## defradb client -Interact with a running DefraDB node as a client +Interact with a DefraDB node ### Synopsis -Interact with a running DefraDB node as a client. -Execute queries, add schema types, and run debug routines. +Interact with a DefraDB node. +Execute queries, add schema types, obtain node info, etc. ### Options @@ -29,11 +29,13 @@ Execute queries, add schema types, and run debug routines. ### SEE ALSO * [defradb](defradb.md) - DefraDB Edge Database +* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility * [defradb client blocks](defradb_client_blocks.md) - Interact with the database's blockstore -* [defradb client dump](defradb_client_dump.md) - Dump the contents of a database node-side -* [defradb client peerid](defradb_client_peerid.md) - Get the PeerID of the DefraDB node -* [defradb client ping](defradb_client_ping.md) - Ping to test connection to a node +* [defradb client dump](defradb_client_dump.md) - Dump the contents of DefraDB node-side +* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance +* [defradb client peerid](defradb_client_peerid.md) - Get the PeerID of the node +* [defradb client ping](defradb_client_ping.md) - Ping to test connection with a node * [defradb client query](defradb_client_query.md) - Send a DefraDB GraphQL query request -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance +* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node diff --git a/docs/cli/defradb_client_backup.md b/docs/cli/defradb_client_backup.md new file mode 100644 index 0000000000..baa08725e1 --- /dev/null +++ b/docs/cli/defradb_client_backup.md @@ -0,0 +1,34 @@ +## defradb client backup + +Interact with the backup utility + +### Synopsis + +Export to or Import from a backup file. +Currently only supports JSON format. + +### Options + +``` + -h, --help help for backup +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client backup export](defradb_client_backup_export.md) - Export the database to a file +* [defradb client backup import](defradb_client_backup_import.md) - Import a JSON data file to the database + diff --git a/docs/cli/defradb_client_backup_export.md b/docs/cli/defradb_client_backup_export.md new file mode 100644 index 0000000000..ea8a22d634 --- /dev/null +++ b/docs/cli/defradb_client_backup_export.md @@ -0,0 +1,46 @@ +## defradb client backup export + +Export the database to a file + +### Synopsis + +Export the database to a file. If a file exists at the location, it will be overwritten. + +If the --collection flag is provided, only the data for that collection will be exported. +Otherwise, all collections in the database will be exported. + +If the --pretty flag is provided, the JSON will be pretty printed. + +Example: export data for the 'Users' collection: + defradb client export --collection Users user_data.json + +``` +defradb client backup export [-c --collections | -p --pretty | -f --format] [flags] +``` + +### Options + +``` + -c, --collections strings List of collections + -f, --format string Define the output format. Supported formats: [json] (default "json") + -h, --help help for export + -p, --pretty Set the output JSON to be pretty printed +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility + diff --git a/docs/cli/defradb_client_backup_import.md b/docs/cli/defradb_client_backup_import.md new file mode 100644 index 0000000000..c539a4d77a --- /dev/null +++ b/docs/cli/defradb_client_backup_import.md @@ -0,0 +1,38 @@ +## defradb client backup import + +Import a JSON data file to the database + +### Synopsis + +Import a JSON data file to the database. + +Example: import data to the database: + defradb client import user_data.json + +``` +defradb client backup import [flags] +``` + +### Options + +``` + -h, --help help for import +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility + diff --git a/docs/cli/defradb_client_blocks.md b/docs/cli/defradb_client_blocks.md index 2824b677ac..e05a853440 100644 --- a/docs/cli/defradb_client_blocks.md +++ b/docs/cli/defradb_client_blocks.md @@ -23,6 +23,6 @@ Interact with the database's blockstore ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client -* [defradb client blocks get](defradb_client_blocks_get.md) - Get a block by its CID from the blockstore. +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client blocks get](defradb_client_blocks_get.md) - Get a block by its CID from the blockstore diff --git a/docs/cli/defradb_client_blocks_get.md b/docs/cli/defradb_client_blocks_get.md index 3c007f2f30..38ff02b63c 100644 --- a/docs/cli/defradb_client_blocks_get.md +++ b/docs/cli/defradb_client_blocks_get.md @@ -1,6 +1,6 @@ ## defradb client blocks get -Get a block by its CID from the blockstore. +Get a block by its CID from the blockstore ``` defradb client blocks get [CID] [flags] diff --git a/docs/cli/defradb_client_dump.md b/docs/cli/defradb_client_dump.md index 1e4404e2fc..862154bc17 100644 --- a/docs/cli/defradb_client_dump.md +++ b/docs/cli/defradb_client_dump.md @@ -1,6 +1,6 @@ ## defradb client dump -Dump the contents of a database node-side +Dump the contents of DefraDB node-side ``` defradb client dump [flags] @@ -27,5 +27,5 @@ defradb client dump [flags] ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node diff --git a/docs/cli/defradb_client_index.md b/docs/cli/defradb_client_index.md new file mode 100644 index 0000000000..4babb57d46 --- /dev/null +++ b/docs/cli/defradb_client_index.md @@ -0,0 +1,34 @@ +## defradb client index + +Manage collections' indexes of a running DefraDB instance + +### Synopsis + +Manage (create, drop, or list) collection indexes on a DefraDB node. + +### Options + +``` + -h, --help help for index +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client index create](defradb_client_index_create.md) - Creates a secondary index on a collection's field(s) +* [defradb client index drop](defradb_client_index_drop.md) - Drop a collection's secondary index +* [defradb client index list](defradb_client_index_list.md) - Shows the list indexes in the database or for a specific collection + diff --git a/docs/cli/defradb_client_index_create.md b/docs/cli/defradb_client_index_create.md new file mode 100644 index 0000000000..7f67e58075 --- /dev/null +++ b/docs/cli/defradb_client_index_create.md @@ -0,0 +1,46 @@ +## defradb client index create + +Creates a secondary index on a collection's field(s) + +### Synopsis + +Creates a secondary index on a collection's field(s). + +The --name flag is optional. If not provided, a name will be generated automatically. + +Example: create an index for 'Users' collection on 'name' field: + defradb client index create --collection Users --fields name + +Example: create a named index for 'Users' collection on 'name' field: + defradb client index create --collection Users --fields name --name UsersByName + +``` +defradb client index create -c --collection --fields [-n --name ] [flags] +``` + +### Options + +``` + -c, --collection string Collection name + --fields string Fields to index + -h, --help help for create + -n, --name string Index name +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance + diff --git a/docs/cli/defradb_client_index_drop.md b/docs/cli/defradb_client_index_drop.md new file mode 100644 index 0000000000..f551fe4658 --- /dev/null +++ b/docs/cli/defradb_client_index_drop.md @@ -0,0 +1,40 @@ +## defradb client index drop + +Drop a collection's secondary index + +### Synopsis + +Drop a collection's secondary index. + +Example: drop the index 'UsersByName' for 'Users' collection: + defradb client index create --collection Users --name UsersByName + +``` +defradb client index drop -c --collection -n --name [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for drop + -n, --name string Index name +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance + diff --git a/docs/cli/defradb_client_index_list.md b/docs/cli/defradb_client_index_list.md new file mode 100644 index 0000000000..bf434d30f2 --- /dev/null +++ b/docs/cli/defradb_client_index_list.md @@ -0,0 +1,42 @@ +## defradb client index list + +Shows the list indexes in the database or for a specific collection + +### Synopsis + +Shows the list indexes in the database or for a specific collection. + +If the --collection flag is provided, only the indexes for that collection will be shown. +Otherwise, all indexes in the database will be shown. + +Example: show all index for 'Users' collection: + defradb client index list --collection Users + +``` +defradb client index list [-c --collection ] [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance + diff --git a/docs/cli/defradb_client_peerid.md b/docs/cli/defradb_client_peerid.md index 3c8bfe6d4e..f4596111c8 100644 --- a/docs/cli/defradb_client_peerid.md +++ b/docs/cli/defradb_client_peerid.md @@ -1,6 +1,10 @@ ## defradb client peerid -Get the PeerID of the DefraDB node +Get the PeerID of the node + +### Synopsis + +Get the PeerID of the node. ``` defradb client peerid [flags] @@ -27,5 +31,5 @@ defradb client peerid [flags] ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node diff --git a/docs/cli/defradb_client_ping.md b/docs/cli/defradb_client_ping.md index bcadf6a0a9..8edd7aff94 100644 --- a/docs/cli/defradb_client_ping.md +++ b/docs/cli/defradb_client_ping.md @@ -1,6 +1,6 @@ ## defradb client ping -Ping to test connection to a node +Ping to test connection with a node ``` defradb client ping [flags] @@ -27,5 +27,5 @@ defradb client ping [flags] ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node diff --git a/docs/cli/defradb_client_query.md b/docs/cli/defradb_client_query.md index 5370ebbdab..8f5c3477c3 100644 --- a/docs/cli/defradb_client_query.md +++ b/docs/cli/defradb_client_query.md @@ -46,5 +46,5 @@ defradb client query [query request] [flags] ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client +* [defradb client](defradb_client.md) - Interact with a DefraDB node diff --git a/docs/cli/defradb_client_rpc.md b/docs/cli/defradb_client_rpc.md index 3acf2972a0..d7046433c5 100644 --- a/docs/cli/defradb_client_rpc.md +++ b/docs/cli/defradb_client_rpc.md @@ -1,15 +1,15 @@ ## defradb client rpc -Interact with a DefraDB gRPC server +Interact with a DefraDB node via RPC ### Synopsis -Interact with a DefraDB gRPC server. +Interact with a DefraDB node via RPC. ### Options ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") -h, --help help for rpc ``` @@ -28,7 +28,7 @@ Interact with a DefraDB gRPC server. ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_addreplicator.md b/docs/cli/defradb_client_rpc_addreplicator.md index 3c10e403b1..e80b667f18 100644 --- a/docs/cli/defradb_client_rpc_addreplicator.md +++ b/docs/cli/defradb_client_rpc_addreplicator.md @@ -5,7 +5,7 @@ Add a new replicator ### Synopsis Use this command if you wish to add a new target replicator -for the p2p data sync system. +for the P2P data sync system. ``` defradb client rpc addreplicator [flags] diff --git a/docs/cli/defradb_client_rpc_p2pcollection.md b/docs/cli/defradb_client_rpc_p2pcollection.md index e6886c0078..ede32521d4 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection.md +++ b/docs/cli/defradb_client_rpc_p2pcollection.md @@ -1,10 +1,11 @@ ## defradb client rpc p2pcollection -Interact with the P2P collection system +Configure the P2P collection system ### Synopsis -Add, delete, or get the list of P2P collections +Add, delete, or get the list of P2P collections. +The selected collections synchronize their events on the pubsub network. ### Options @@ -15,7 +16,7 @@ Add, delete, or get the list of P2P collections ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,8 +29,8 @@ Add, delete, or get the list of P2P collections ### SEE ALSO -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server +* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC * [defradb client rpc p2pcollection add](defradb_client_rpc_p2pcollection_add.md) - Add P2P collections * [defradb client rpc p2pcollection getall](defradb_client_rpc_p2pcollection_getall.md) - Get all P2P collections -* [defradb client rpc p2pcollection remove](defradb_client_rpc_p2pcollection_remove.md) - Add P2P collections +* [defradb client rpc p2pcollection remove](defradb_client_rpc_p2pcollection_remove.md) - Remove P2P collections diff --git a/docs/cli/defradb_client_rpc_p2pcollection_add.md b/docs/cli/defradb_client_rpc_p2pcollection_add.md index c0fb67c53b..92ac0d82e6 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_add.md +++ b/docs/cli/defradb_client_rpc_p2pcollection_add.md @@ -4,7 +4,8 @@ Add P2P collections ### Synopsis -Use this command if you wish to add new P2P collections to the pubsub topics +Add P2P collections to the synchronized pubsub topics. +The collections are synchronized between nodes of a pubsub network. ``` defradb client rpc p2pcollection add [collectionID] [flags] @@ -19,7 +20,7 @@ defradb client rpc p2pcollection add [collectionID] [flags] ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -32,5 +33,5 @@ defradb client rpc p2pcollection add [collectionID] [flags] ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_p2pcollection_getall.md b/docs/cli/defradb_client_rpc_p2pcollection_getall.md index c808c72f34..946a2e0156 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_getall.md +++ b/docs/cli/defradb_client_rpc_p2pcollection_getall.md @@ -4,7 +4,8 @@ Get all P2P collections ### Synopsis -Use this command if you wish to get all P2P collections in the pubsub topics +Get all P2P collections in the pubsub topics. +This is the list of collections of the node that are synchronized on the pubsub network. ``` defradb client rpc p2pcollection getall [flags] @@ -19,7 +20,7 @@ defradb client rpc p2pcollection getall [flags] ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -32,5 +33,5 @@ defradb client rpc p2pcollection getall [flags] ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_p2pcollection_remove.md b/docs/cli/defradb_client_rpc_p2pcollection_remove.md index 985d21afc2..77658b4d50 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_remove.md +++ b/docs/cli/defradb_client_rpc_p2pcollection_remove.md @@ -1,10 +1,11 @@ ## defradb client rpc p2pcollection remove -Add P2P collections +Remove P2P collections ### Synopsis -Use this command if you wish to remove P2P collections from the pubsub topics +Remove P2P collections from the followed pubsub topics. +The removed collections will no longer be synchronized between nodes. ``` defradb client rpc p2pcollection remove [collectionID] [flags] @@ -19,7 +20,7 @@ defradb client rpc p2pcollection remove [collectionID] [flags] ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -32,5 +33,5 @@ defradb client rpc p2pcollection remove [collectionID] [flags] ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Interact with the P2P collection system +* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_replicator.md b/docs/cli/defradb_client_rpc_replicator.md index 8d577b4c27..e88933791c 100644 --- a/docs/cli/defradb_client_rpc_replicator.md +++ b/docs/cli/defradb_client_rpc_replicator.md @@ -1,10 +1,11 @@ ## defradb client rpc replicator -Interact with the replicator system +Configure the replicator system ### Synopsis -Add, delete, or get the list of persisted replicators +Configure the replicator system. Add, delete, or get the list of persisted replicators. +A replicator replicates one or all collection(s) from one node to another. ### Options @@ -15,7 +16,7 @@ Add, delete, or get the list of persisted replicators ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,8 +29,8 @@ Add, delete, or get the list of persisted replicators ### SEE ALSO -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server -* [defradb client rpc replicator delete](defradb_client_rpc_replicator_delete.md) - Delete a replicator +* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC +* [defradb client rpc replicator delete](defradb_client_rpc_replicator_delete.md) - Delete a replicator. It will stop synchronizing * [defradb client rpc replicator getall](defradb_client_rpc_replicator_getall.md) - Get all replicators * [defradb client rpc replicator set](defradb_client_rpc_replicator_set.md) - Set a P2P replicator diff --git a/docs/cli/defradb_client_rpc_replicator_delete.md b/docs/cli/defradb_client_rpc_replicator_delete.md index cb7182f01b..c851d2f508 100644 --- a/docs/cli/defradb_client_rpc_replicator_delete.md +++ b/docs/cli/defradb_client_rpc_replicator_delete.md @@ -1,11 +1,10 @@ ## defradb client rpc replicator delete -Delete a replicator +Delete a replicator. It will stop synchronizing ### Synopsis -Use this command if you wish to remove the target replicator - for the p2p data sync system. +Delete a replicator. It will stop synchronizing. ``` defradb client rpc replicator delete [-f, --full | -c, --collection] [flags] @@ -22,7 +21,7 @@ defradb client rpc replicator delete [-f, --full | -c, --collection] [fla ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -35,5 +34,5 @@ defradb client rpc replicator delete [-f, --full | -c, --collection] [fla ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_replicator_getall.md b/docs/cli/defradb_client_rpc_replicator_getall.md index 41f47d63fd..2449dba1fd 100644 --- a/docs/cli/defradb_client_rpc_replicator_getall.md +++ b/docs/cli/defradb_client_rpc_replicator_getall.md @@ -4,7 +4,8 @@ Get all replicators ### Synopsis -Use this command if you wish to get all the replicators for the p2p data sync system. +Get all the replicators active in the P2P data sync system. +These are the replicators that are currently replicating data from one node to another. ``` defradb client rpc replicator getall [flags] @@ -19,7 +20,7 @@ defradb client rpc replicator getall [flags] ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -32,5 +33,5 @@ defradb client rpc replicator getall [flags] ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_replicator_set.md b/docs/cli/defradb_client_rpc_replicator_set.md index 1f94a34467..24b7add648 100644 --- a/docs/cli/defradb_client_rpc_replicator_set.md +++ b/docs/cli/defradb_client_rpc_replicator_set.md @@ -4,8 +4,9 @@ Set a P2P replicator ### Synopsis -Use this command if you wish to add a new target replicator - for the p2p data sync system or add schemas to an existing one +Add a new target replicator. +A replicator replicates one or all collection(s) from this node to another. + ``` defradb client rpc replicator set [-f, --full | -c, --collection] [flags] @@ -22,7 +23,7 @@ defradb client rpc replicator set [-f, --full | -c, --collection] [flags] ### Options inherited from parent commands ``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") + --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -35,5 +36,5 @@ defradb client rpc replicator set [-f, --full | -c, --collection] [flags] ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Interact with the replicator system +* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_schema.md b/docs/cli/defradb_client_schema.md index b19ff013b9..c36c8d4bce 100644 --- a/docs/cli/defradb_client_schema.md +++ b/docs/cli/defradb_client_schema.md @@ -1,10 +1,10 @@ ## defradb client schema -Interact with the schema system of a running DefraDB instance +Interact with the schema system of a DefraDB node ### Synopsis -Make changes, updates, or look for existing schema types to a DefraDB node. +Make changes, updates, or look for existing schema types. ### Options @@ -27,7 +27,9 @@ Make changes, updates, or look for existing schema types to a DefraDB node. ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a running DefraDB node as a client -* [defradb client schema add](defradb_client_schema_add.md) - Add a new schema type to DefraDB +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client schema add](defradb_client_schema_add.md) - Add new schema +* [defradb client schema list](defradb_client_schema_list.md) - List schema types with their respective fields +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance * [defradb client schema patch](defradb_client_schema_patch.md) - Patch an existing schema type diff --git a/docs/cli/defradb_client_schema_add.md b/docs/cli/defradb_client_schema_add.md index 29c713bfbe..b278431034 100644 --- a/docs/cli/defradb_client_schema_add.md +++ b/docs/cli/defradb_client_schema_add.md @@ -1,10 +1,10 @@ ## defradb client schema add -Add a new schema type to DefraDB +Add new schema ### Synopsis -Add a new schema type to DefraDB. +Add new schema. Example: add from an argument string: defradb client schema add 'type Foo { ... }' @@ -43,5 +43,5 @@ defradb client schema add [schema] [flags] ### SEE ALSO -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node diff --git a/docs/cli/defradb_client_schema_list.md b/docs/cli/defradb_client_schema_list.md new file mode 100644 index 0000000000..ffbe253e31 --- /dev/null +++ b/docs/cli/defradb_client_schema_list.md @@ -0,0 +1,31 @@ +## defradb client schema list + +List schema types with their respective fields + +``` +defradb client schema list [flags] +``` + +### Options + +``` + -h, --help help for list +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node + diff --git a/docs/cli/defradb_client_schema_migration.md b/docs/cli/defradb_client_schema_migration.md new file mode 100644 index 0000000000..0a20968378 --- /dev/null +++ b/docs/cli/defradb_client_schema_migration.md @@ -0,0 +1,33 @@ +## defradb client schema migration + +Interact with the schema migration system of a running DefraDB instance + +### Synopsis + +Make set or look for existing schema migrations on a DefraDB node. + +### Options + +``` + -h, --help help for migration +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node +* [defradb client schema migration get](defradb_client_schema_migration_get.md) - Gets the schema migrations within DefraDB +* [defradb client schema migration set](defradb_client_schema_migration_set.md) - Set a schema migration within DefraDB + diff --git a/docs/cli/defradb_client_schema_migration_get.md b/docs/cli/defradb_client_schema_migration_get.md new file mode 100644 index 0000000000..d2164ed6bd --- /dev/null +++ b/docs/cli/defradb_client_schema_migration_get.md @@ -0,0 +1,40 @@ +## defradb client schema migration get + +Gets the schema migrations within DefraDB + +### Synopsis + +Gets the schema migrations within the local DefraDB node. + +Example: + defradb client schema migration get' + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network. + +``` +defradb client schema migration get [flags] +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance + diff --git a/docs/cli/defradb_client_schema_migration_set.md b/docs/cli/defradb_client_schema_migration_set.md new file mode 100644 index 0000000000..8013fd2a29 --- /dev/null +++ b/docs/cli/defradb_client_schema_migration_set.md @@ -0,0 +1,47 @@ +## defradb client schema migration set + +Set a schema migration within DefraDB + +### Synopsis + +Set a migration between two schema versions within the local DefraDB node. + +Example: set from an argument string: + defradb client schema migration set bae123 bae456 '{"lenses": [...' + +Example: set from file: + defradb client schema migration set bae123 bae456 -f schema_migration.lens + +Example: add from stdin: + cat schema_migration.lens | defradb client schema migration set bae123 bae456 - + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network. + +``` +defradb client schema migration set [src] [dst] [cfg] [flags] +``` + +### Options + +``` + -f, --file string Lens configuration file + -h, --help help for set +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance + diff --git a/docs/cli/defradb_client_schema_patch.md b/docs/cli/defradb_client_schema_patch.md index a70ea29517..ec64d293e0 100644 --- a/docs/cli/defradb_client_schema_patch.md +++ b/docs/cli/defradb_client_schema_patch.md @@ -6,7 +6,7 @@ Patch an existing schema type Patch an existing schema. -Uses JSON PATCH formatting as a DDL. +Uses JSON Patch to modify schema types. Example: patch from an argument string: defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' @@ -45,5 +45,5 @@ defradb client schema patch [schema] [flags] ### SEE ALSO -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a running DefraDB instance +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node diff --git a/docs/cli/defradb_init.md b/docs/cli/defradb_init.md index 84a4bff742..f8d69f5794 100644 --- a/docs/cli/defradb_init.md +++ b/docs/cli/defradb_init.md @@ -5,6 +5,7 @@ Initialize DefraDB's root directory and configuration file ### Synopsis Initialize a directory for configuration and data at the given path. +Passed flags will be persisted in the stored configuration. ``` defradb init [flags] diff --git a/docs/cli/defradb_start.md b/docs/cli/defradb_start.md index 014f011ad2..d23b1fcacb 100644 --- a/docs/cli/defradb_start.md +++ b/docs/cli/defradb_start.md @@ -4,7 +4,7 @@ Start a DefraDB node ### Synopsis -Start a new instance of DefraDB node. +Start a DefraDB node. ``` defradb start [flags] diff --git a/docs/data_format_changes/i1243-enable-previously-skipped-explain-tests.md b/docs/data_format_changes/i1243-enable-previously-skipped-explain-tests.md new file mode 100644 index 0000000000..c63c7ddf5e --- /dev/null +++ b/docs/data_format_changes/i1243-enable-previously-skipped-explain-tests.md @@ -0,0 +1,5 @@ +# Enable Refactored Explain Tests That Were Always Skipped + +Previously we had explain tests always being skipped, the integration of explain setup into the action based testing +setup enabled them, but since they were being skipped previously change detector keeps failing. This isn't a breaking +change. diff --git a/docs/data_format_changes/i1448-migration-engine.md b/docs/data_format_changes/i1448-migration-engine.md new file mode 100644 index 0000000000..aff5be3759 --- /dev/null +++ b/docs/data_format_changes/i1448-migration-engine.md @@ -0,0 +1,3 @@ +# Add lens migration engine to defra + +A new key-value was added to the datastore, it tracks the schema version of a datastore document and is required. If need be it could be set to the latest schema version for all documents, but that would prevent the migration of those records from their true version to that set version. \ No newline at end of file diff --git a/docs/data_format_changes/i1530-change-detector-without-non-mutations.md b/docs/data_format_changes/i1530-change-detector-without-non-mutations.md new file mode 100644 index 0000000000..602254206d --- /dev/null +++ b/docs/data_format_changes/i1530-change-detector-without-non-mutations.md @@ -0,0 +1,3 @@ +# Change detector without non-mutations actions + +The previous fix caused a regression in the change detector and we need a documentation to break the cycle. diff --git a/docs/data_format_changes/i1602-no-change-tests-updated.md b/docs/data_format_changes/i1602-no-change-tests-updated.md new file mode 100644 index 0000000000..765f2261a5 --- /dev/null +++ b/docs/data_format_changes/i1602-no-change-tests-updated.md @@ -0,0 +1,3 @@ +# Rework transaction test framework capabilities + +This is not a breaking change, a test was split, which caused the change detector test-case to change. \ No newline at end of file diff --git a/docs/data_format_changes/i1620-remove-first-crdt-byte.md b/docs/data_format_changes/i1620-remove-first-crdt-byte.md new file mode 100644 index 0000000000..7f605f9129 --- /dev/null +++ b/docs/data_format_changes/i1620-remove-first-crdt-byte.md @@ -0,0 +1,3 @@ +# Remove the first CRDT byte from field encoded values + +The first CRDT byte was legacy code and no longer necessary as we have this information independently available via the client.FieldDescription, since the FieldDescription.Typ is the exact same value. \ No newline at end of file diff --git a/go.mod b/go.mod index a5919c79bd..d935137715 100644 --- a/go.mod +++ b/go.mod @@ -1,58 +1,60 @@ module github.com/sourcenetwork/defradb -go 1.19 +go 1.20 require ( + github.com/bits-and-blooms/bitset v1.8.0 github.com/bxcodec/faker v2.0.1+incompatible github.com/dgraph-io/badger/v3 v3.2103.5 github.com/evanphx/json-patch/v5 v5.6.0 github.com/fxamacker/cbor/v2 v2.4.0 - github.com/go-chi/chi/v5 v5.0.8 + github.com/go-chi/chi/v5 v5.0.10 github.com/go-chi/cors v1.2.1 github.com/go-errors/errors v1.4.2 github.com/gofrs/uuid/v5 v5.0.0 - github.com/gogo/protobuf v1.3.2 github.com/graphql-go/graphql v0.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/iancoleman/strcase v0.2.0 - github.com/ipfs/boxo v0.8.1 + github.com/iancoleman/strcase v0.3.0 + github.com/ipfs/boxo v0.10.2 github.com/ipfs/go-block-format v0.1.2 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 - github.com/ipfs/go-ipld-format v0.4.0 + github.com/ipfs/go-ipld-format v0.5.0 github.com/ipfs/go-log v1.0.5 github.com/ipfs/go-log/v2 v2.5.1 github.com/jbenet/goprocess v0.1.4 - github.com/libp2p/go-libp2p v0.27.1 + github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25 + github.com/libp2p/go-libp2p v0.28.0 github.com/libp2p/go-libp2p-gostream v0.6.0 - github.com/libp2p/go-libp2p-kad-dht v0.23.0 + github.com/libp2p/go-libp2p-kad-dht v0.24.2 github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-record v0.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiformats/go-multiaddr v0.9.0 + github.com/multiformats/go-multiaddr v0.10.1 github.com/multiformats/go-multibase v0.2.0 - github.com/multiformats/go-multihash v0.2.1 - github.com/multiformats/go-varint v0.0.7 + github.com/multiformats/go-multihash v0.2.3 github.com/pkg/errors v0.9.1 - github.com/sourcenetwork/immutable v0.2.2 + github.com/planetscale/vtprotobuf v0.4.0 + github.com/sourcenetwork/immutable v0.3.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.15.0 - github.com/stretchr/testify v1.8.2 + github.com/spf13/viper v1.16.0 + github.com/stretchr/testify v1.8.4 github.com/textileio/go-libp2p-pubsub-rpc v0.0.9 github.com/tidwall/btree v1.6.0 github.com/ugorji/go/codec v1.2.11 github.com/valyala/fastjson v1.6.4 - go.opentelemetry.io/otel/metric v0.36.0 - go.opentelemetry.io/otel/sdk/metric v0.36.0 + go.opentelemetry.io/otel/metric v1.16.0 + go.opentelemetry.io/otel/sdk/metric v0.39.0 go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.9.0 - golang.org/x/net v0.10.0 - google.golang.org/grpc v1.55.0 + golang.org/x/crypto v0.11.0 + golang.org/x/net v0.12.0 + google.golang.org/grpc v1.56.2 + google.golang.org/protobuf v1.31.0 ) require ( - github.com/benbjohnson/clock v1.3.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -62,7 +64,7 @@ require ( github.com/cskr/pubsub v1.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect @@ -70,10 +72,11 @@ require ( github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect @@ -81,15 +84,16 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/flatbuffers v2.0.6+incompatible // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect + github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hsanjuan/ipfs-lite v1.4.1 // indirect - github.com/huin/goupnp v1.1.0 // indirect + github.com/huin/goupnp v1.2.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitswap v0.12.0 // indirect @@ -103,7 +107,7 @@ require ( github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-ipld-cbor v0.0.6 // indirect - github.com/ipfs/go-ipld-legacy v0.1.1 // indirect + github.com/ipfs/go-ipld-legacy v0.2.1 // indirect github.com/ipfs/go-libipfs v0.7.0 // indirect github.com/ipfs/go-merkledag v0.9.0 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect @@ -113,8 +117,8 @@ require ( github.com/ipld/go-ipld-prime v0.20.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.16.4 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/klauspost/compress v1.16.5 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect @@ -122,78 +126,78 @@ require ( github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect github.com/libp2p/go-libp2p-connmgr v0.4.0 // indirect github.com/libp2p/go-libp2p-core v0.20.0 // indirect - github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect - github.com/libp2p/go-libp2p-routing-helpers v0.4.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect - github.com/libp2p/go-nat v0.1.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-reuseport v0.2.0 // indirect + github.com/libp2p/go-reuseport v0.3.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.53 // indirect + github.com/miekg/dns v1.1.54 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect - github.com/minio/sha256-simd v1.0.0 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multicodec v0.8.1 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect - github.com/onsi/ginkgo/v2 v2.9.2 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/onsi/ginkgo/v2 v2.9.7 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qtls-go1-19 v0.3.2 // indirect github.com/quic-go/qtls-go1-20 v0.2.2 // indirect github.com/quic-go/quic-go v0.33.0 // indirect - github.com/quic-go/webtransport-go v0.5.2 // indirect + github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.9.3 // indirect - github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect + github.com/tetratelabs/wazero v1.3.1 // indirect github.com/textileio/go-log/v2 v2.1.3-gke-2 // indirect github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/x448/float16 v0.8.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/sdk v1.14.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/dig v1.16.1 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/sdk v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.17.0 // indirect go.uber.org/fx v1.19.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect - golang.org/x/tools v0.7.0 // indirect + golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect + golang.org/x/mod v0.11.0 // indirect + golang.org/x/sync v0.2.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect + golang.org/x/tools v0.9.1 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - gonum.org/v1/gonum v0.11.0 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - google.golang.org/protobuf v1.30.0 // indirect + gonum.org/v1/gonum v0.13.0 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.1.7 // indirect - nhooyr.io/websocket v1.8.7 // indirect + lukechampine.com/blake3 v1.2.1 // indirect ) // SourceNetwork fork og graphql-go diff --git a/go.sum b/go.sum index 8b9d753290..2fdb78dca4 100644 --- a/go.sum +++ b/go.sum @@ -76,13 +76,16 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= +github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -156,9 +159,9 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= @@ -214,14 +217,10 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= -github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= -github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= +github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -238,28 +237,15 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -352,8 +338,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= -github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -377,6 +363,7 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= @@ -422,11 +409,11 @@ github.com/hsanjuan/ipfs-lite v1.4.1 h1:l+mnqk6wm2GiVJWn4u0UBtX+YqqA5cfsjX1ZujPx github.com/hsanjuan/ipfs-lite v1.4.1/go.mod h1:+c/L+PWf0l7DhmQF3cO2O3GBRQT/pUZrl86VG//O9Hk= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU= -github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= +github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -436,8 +423,8 @@ github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.8.1 h1:3DkKBCK+3rdEB5t77WDShUXXhktYwH99mkAsgajsKrU= -github.com/ipfs/boxo v0.8.1/go.mod h1:xJ2hVb4La5WyD7GvKYE0lq2g1rmQZoCD2K4WNrV6aZI= +github.com/ipfs/boxo v0.10.2 h1:kspw9HmMyKzLQxpKk417sF69i6iuf50AXtRjFqCYyL4= +github.com/ipfs/boxo v0.10.2/go.mod h1:1qgKq45mPRCxf4ZPoJV2lnXxyxucigILMJOrQrVivv8= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= @@ -539,10 +526,10 @@ github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4uk github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= -github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= -github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= -github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= +github.com/ipfs/go-ipld-format v0.5.0 h1:WyEle9K96MSrvr47zZHKKcDxJ/vlpET6PSiQsAFO+Ds= +github.com/ipfs/go-ipld-format v0.5.0/go.mod h1:ImdZqJQaEouMjCvqCe0ORUS+uoBmf7Hf+EO/jh+nk3M= +github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= +github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A= github.com/ipfs/go-libipfs v0.7.0 h1:Mi54WJTODaOL2/ZSm5loi3SwI3jI2OuFWUrQIkJ5cpM= github.com/ipfs/go-libipfs v0.7.0/go.mod h1:KsIf/03CqhICzyRGyGo68tooiBE2iFbI/rXW7FhAYr0= @@ -582,7 +569,6 @@ github.com/ipfs/interface-go-ipfs-core v0.10.0 h1:b/psL1oqJcySdQAsIBfW5ZJJkOAsYl github.com/ipfs/interface-go-ipfs-core v0.10.0/go.mod h1:F3EcmDy53GFkF0H3iEJpfJC320fZ/4G60eftnItrrJ0= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= -github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= @@ -609,9 +595,7 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -625,14 +609,12 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= -github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= @@ -649,8 +631,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25 h1:hC67vWtvuDnw8w6u4jLFoj3SOH92/4Lq8SCR++L7njw= +github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25/go.mod h1:rDE4oJUIAQoXX9heUg8VOQf5LscRWj0BeE5mbGqOs3E= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= @@ -678,8 +660,8 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.27.1 h1:k1u6RHsX3hqKnslDjsSgLNURxJ3O1atIZCY4gpMbbus= -github.com/libp2p/go-libp2p v0.27.1/go.mod h1:FAvvfQa/YOShUYdiSS03IR9OXzkcJXwcNA2FUCh9ImE= +github.com/libp2p/go-libp2p v0.28.0 h1:zO8cY98nJiPzZpFv5w5gqqb8aVzt4ukQ0nVOSaaKhJ8= +github.com/libp2p/go-libp2p v0.28.0/go.mod h1:s3Xabc9LSwOcnv9UD4nORnXKTsWkPMkIMB/JIGXVnzk= github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= @@ -741,10 +723,10 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= -github.com/libp2p/go-libp2p-kad-dht v0.23.0 h1:sxE6LxLopp79eLeV695n7+c77V/Vn4AMF28AdM/XFqM= -github.com/libp2p/go-libp2p-kad-dht v0.23.0/go.mod h1:oO5N308VT2msnQI6qi5M61wzPmJYg7Tr9e16m5n7uDU= -github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= -github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= +github.com/libp2p/go-libp2p-kad-dht v0.24.2 h1:zd7myKBKCmtZBhI3I0zm8xBkb28v3gmSEtQfBdAdFwc= +github.com/libp2p/go-libp2p-kad-dht v0.24.2/go.mod h1:BShPzRbK6+fN3hk8a0WGAYKpb8m4k+DtchkqouGTrSg= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= @@ -787,8 +769,8 @@ github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7 github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= -github.com/libp2p/go-libp2p-routing-helpers v0.4.0 h1:b7y4aixQ7AwbqYfcOQ6wTw8DQvuRZeTAA0Od3YYN5yc= -github.com/libp2p/go-libp2p-routing-helpers v0.4.0/go.mod h1:dYEAgkVhqho3/YKxfOEGdFMIcWfAFNlZX8iAIihYA2E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.0 h1:sirOYVD0wGWjkDwHZvinunIpaqPLBXkcnXApVHwZFGA= +github.com/libp2p/go-libp2p-routing-helpers v0.7.0/go.mod h1:R289GUxUMzRXIbWGSuUUTPrlVJZ3Y/pPz495+qgXJX8= github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= @@ -850,8 +832,8 @@ github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbx github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= -github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= @@ -865,8 +847,8 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= -github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= +github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw= +github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= @@ -925,11 +907,10 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= -github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= @@ -940,8 +921,8 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= -github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= +github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -954,8 +935,9 @@ github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+ github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -968,11 +950,9 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -996,8 +976,8 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ= -github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0= +github.com/multiformats/go-multiaddr v0.10.1 h1:HghtFrWyZEPrpTvgAMFJi6gFdgHfs2cb0pyfDsk+lqU= +github.com/multiformats/go-multiaddr v0.10.1/go.mod h1:jLEZsA61rwWNZQTHHnqq2HNa+4os/Hz54eqiRnsRqYQ= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1018,8 +998,8 @@ github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8= -github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1027,8 +1007,8 @@ github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpK github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= @@ -1064,15 +1044,15 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= -github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss= +github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1093,8 +1073,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= -github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1104,11 +1084,12 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/planetscale/vtprotobuf v0.4.0 h1:NEI+g4woRaAZgeZ3sAvbtyvMBRjIv5kE7EWYQ8m4JwY= +github.com/planetscale/vtprotobuf v0.4.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= @@ -1128,8 +1109,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1155,8 +1136,8 @@ github.com/quic-go/qtls-go1-20 v0.2.2 h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8G github.com/quic-go/qtls-go1-20 v0.2.2/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0= github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= -github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= -github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= +github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1216,19 +1197,19 @@ github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.0-20230209220544-e16d5e34c4fc github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.0-20230209220544-e16d5e34c4fc/go.mod h1:3rOV6TxePSwADKpnwXBKpTjAA4QyjZBus13xc6VCtSw= github.com/sourcenetwork/graphql-go v0.7.10-0.20230511091704-fe7085512c23 h1:QcSWSYlE1alUC0uOO/trppYMLpR8OuFIL8IqR+PR5sA= github.com/sourcenetwork/graphql-go v0.7.10-0.20230511091704-fe7085512c23/go.mod h1:3Ty9EMes+aoxl8xS0CsuCGQZ4JEsOlC5yqQDLOKoBRw= -github.com/sourcenetwork/immutable v0.2.2 h1:Qjz1cCWhgjS6YkUTWb53R22wSYMEZhzBghhEzWaFi8c= -github.com/sourcenetwork/immutable v0.2.2/go.mod h1:4jpxObkIQw8pvlIRm4ndZqf3pH9ZjYEw/UYI6GZDJww= +github.com/sourcenetwork/immutable v0.3.0 h1:gHPtGvLrTBTK5YpDAhMU+u+S8v1F6iYmc3nbZLryMdc= +github.com/sourcenetwork/immutable v0.3.0/go.mod h1:GD7ceuh/HD7z6cdIwzKK2ctzgZ1qqYFJpsFp+8qYnbI= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= -github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= @@ -1241,8 +1222,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= -github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1261,13 +1242,16 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tetratelabs/wazero v1.3.1 h1:rnb9FgOEQRLLR8tgoD1mfjNjMhFeWRUk+a4b4j/GpUM= +github.com/tetratelabs/wazero v1.3.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= github.com/textileio/go-datastore-extensions v1.0.1 h1:qIJGqJaigQ1wD4TdwS/hf73u0HChhXvvUSJuxBEKS+c= github.com/textileio/go-ds-badger3 v0.1.0 h1:q0kBuBmAcRUR3ClMSYlyw0224XeuzjjGinU53Qz1uXI= github.com/textileio/go-log/v2 v2.1.3-gke-2 h1:YkMA5ua0Cf/X6CkbexInsoJ/HdaHQBlgiv9Yy9hddNM= @@ -1275,9 +1259,7 @@ github.com/textileio/go-log/v2 v2.1.3-gke-2/go.mod h1:DwACkjFS3kjZZR/4Spx3aPfSsc github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1333,25 +1315,25 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/metric v0.36.0 h1:t0lgGI+L68QWt3QtOIlqM9gXoxqxWLhZ3R/e5oOAY0Q= -go.opentelemetry.io/otel/metric v0.36.0/go.mod h1:wKVw57sd2HdSZAzyfOM9gTqqE8v7CbqWsYL6AyrH9qk= -go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= -go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= -go.opentelemetry.io/otel/sdk/metric v0.36.0 h1:dEXpkkOAEcHiRiaZdvd63MouV+3bCtAB/bF3jlNKnr8= -go.opentelemetry.io/otel/sdk/metric v0.36.0/go.mod h1:Lv4HQQPSCSkhyBKzLNtE8YhTSdK4HCwNh3lh7CiR20s= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= +go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= +go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8= -go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY= go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= @@ -1404,9 +1386,9 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1417,8 +1399,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1444,8 +1426,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1500,8 +1482,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1525,8 +1508,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1608,12 +1591,12 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1623,8 +1606,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1691,16 +1675,16 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= +gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -1775,8 +1759,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1801,8 +1785,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1815,8 +1799,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1856,10 +1840,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= -lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/lens/fetcher.go b/lens/fetcher.go new file mode 100644 index 0000000000..bfd8fca3bc --- /dev/null +++ b/lens/fetcher.go @@ -0,0 +1,426 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package lens + +import ( + "context" + "reflect" + + "github.com/fxamacker/cbor/v2" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/fetcher" + "github.com/sourcenetwork/defradb/planner/mapper" +) + +// todo: The code in here can be significantly simplified with: +// https://github.com/sourcenetwork/defradb/issues/1589 + +type lensedFetcher struct { + source fetcher.Fetcher + registry client.LensRegistry + lens Lens + + txn datastore.Txn + + col *client.CollectionDescription + // Cache the fieldDescriptions mapped by name to allow for cheaper access within the fetcher loop + fieldDescriptionsByName map[string]client.FieldDescription + + targetVersionID string + + // If true there are migrations registered for the collection being fetched. + hasMigrations bool +} + +var _ fetcher.Fetcher = (*lensedFetcher)(nil) + +// NewFetcher returns a new fetcher that will migrate any documents from the given +// source Fetcher as they are are yielded. +func NewFetcher(source fetcher.Fetcher, registry client.LensRegistry) fetcher.Fetcher { + return &lensedFetcher{ + source: source, + registry: registry, + } +} + +func (f *lensedFetcher) Init( + ctx context.Context, + txn datastore.Txn, + col *client.CollectionDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docmapper *core.DocumentMapping, + reverse bool, + showDeleted bool, +) error { + f.col = col + + f.fieldDescriptionsByName = make(map[string]client.FieldDescription, len(col.Schema.Fields)) + // Add cache the field descriptions in reverse, allowing smaller-index fields to overwrite any later + // ones. This should never really happen here, but it ensures the result is consistent with col.GetField + // which returns the first one it finds with a matching name. + for i := len(col.Schema.Fields) - 1; i >= 0; i-- { + field := col.Schema.Fields[i] + f.fieldDescriptionsByName[field.Name] = field + } + + history, err := getTargetedSchemaHistory(ctx, txn, f.registry.Config(), f.col.Schema.SchemaID, f.col.Schema.VersionID) + if err != nil { + return err + } + f.lens = new(f.registry, f.col.Schema.VersionID, history) + f.txn = txn + + for schemaVersionID := range history { + if f.registry.HasMigration(schemaVersionID) { + f.hasMigrations = true + break + } + } + + f.targetVersionID = col.Schema.VersionID + + var innerFetcherFields []client.FieldDescription + if f.hasMigrations { + // If there are migrations present, they may require fields that are not otherwise + // requested. At the moment this means we need to pass in nil so that the underlying + // fetcher fetches everything. + innerFetcherFields = nil + } else { + innerFetcherFields = fields + } + return f.source.Init(ctx, txn, col, innerFetcherFields, filter, docmapper, reverse, showDeleted) +} + +func (f *lensedFetcher) Start(ctx context.Context, spans core.Spans) error { + return f.source.Start(ctx, spans) +} + +func (f *lensedFetcher) FetchNext(ctx context.Context) (fetcher.EncodedDocument, fetcher.ExecInfo, error) { + panic("This function is never called and is dead code. As this type is internal, panicing is okay for now") +} + +func (f *lensedFetcher) FetchNextDecoded( + ctx context.Context, +) (*client.Document, fetcher.ExecInfo, error) { + doc, execInfo, err := f.source.FetchNextDecoded(ctx) + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + if doc == nil { + return nil, execInfo, nil + } + + if !f.hasMigrations || doc.SchemaVersionID == f.targetVersionID { + // If there are no migrations registered for this schema, or if the document is already + // at the target schema version, no migration is required and we can return it early. + return doc, execInfo, nil + } + + sourceLensDoc, err := clientDocToLensDoc(doc) + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + err = f.lens.Put(doc.SchemaVersionID, sourceLensDoc) + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + hasNext, err := f.lens.Next() + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + if !hasNext { + // The migration decided to not yield a document, so we cycle through the next fetcher doc + doc, nextExecInfo, err := f.FetchNextDecoded(ctx) + execInfo.Add(nextExecInfo) + return doc, execInfo, err + } + + migratedLensDoc, err := f.lens.Value() + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + migratedDoc, err := f.lensDocToClientDoc(migratedLensDoc) + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + err = f.updateDataStore(ctx, sourceLensDoc, migratedLensDoc) + if err != nil { + return nil, fetcher.ExecInfo{}, err + } + + return migratedDoc, execInfo, nil +} + +func (f *lensedFetcher) FetchNextDoc( + ctx context.Context, + mapping *core.DocumentMapping, +) ([]byte, core.Doc, fetcher.ExecInfo, error) { + key, doc, execInfo, err := f.source.FetchNextDoc(ctx, mapping) + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + + if len(doc.Fields) == 0 { + return key, doc, execInfo, nil + } + + if doc.SchemaVersionID == f.targetVersionID { + // If the document is already at the target schema version, no migration is required and + // we can return it early. + return key, doc, execInfo, nil + } + + sourceLensDoc, err := coreDocToLensDoc(mapping, doc) + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + err = f.lens.Put(doc.SchemaVersionID, sourceLensDoc) + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + + hasNext, err := f.lens.Next() + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + if !hasNext { + // The migration decided to not yield a document, so we cycle through the next fetcher doc + key, doc, nextExecInfo, err := f.FetchNextDoc(ctx, mapping) + execInfo.Add(nextExecInfo) + return key, doc, execInfo, err + } + + migratedLensDoc, err := f.lens.Value() + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + + migratedDoc, err := f.lensDocToCoreDoc(mapping, migratedLensDoc) + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + + err = f.updateDataStore(ctx, sourceLensDoc, migratedLensDoc) + if err != nil { + return nil, core.Doc{}, fetcher.ExecInfo{}, err + } + + return key, migratedDoc, execInfo, nil +} + +func (f *lensedFetcher) Close() error { + if f.lens != nil { + f.lens.Reset() + } + return f.source.Close() +} + +// clientDocToLensDoc converts a client.Document to a LensDoc. +func clientDocToLensDoc(doc *client.Document) (LensDoc, error) { + docAsMap := map[string]any{} + + for field, fieldValue := range doc.Values() { + docAsMap[field.Name()] = fieldValue.Value() + } + docAsMap[request.KeyFieldName] = doc.Key().String() + + // Note: client.Document does not have a means of flagging as to whether it is + // deleted or not, and, currently the fetcher does not ever returned deleted items + // from the function that returs this type. + + return docAsMap, nil +} + +// coreDocToLensDoc converts a core.Doc to a LensDoc. +func coreDocToLensDoc(mapping *core.DocumentMapping, doc core.Doc) (LensDoc, error) { + docAsMap := map[string]any{} + + for fieldIndex, fieldValue := range doc.Fields { + fieldName, ok := mapping.TryToFindNameFromIndex(fieldIndex) + if !ok { + continue + } + docAsMap[fieldName] = fieldValue + } + + docAsMap[request.DeletedFieldName] = doc.Status.IsDeleted() + + return docAsMap, nil +} + +// lensDocToCoreDoc converts a LensDoc to a core.Doc. +func (f *lensedFetcher) lensDocToCoreDoc(mapping *core.DocumentMapping, docAsMap LensDoc) (core.Doc, error) { + doc := mapping.NewDoc() + + for fieldName, fieldByteValue := range docAsMap { + if fieldName == request.KeyFieldName { + key, ok := fieldByteValue.(string) + if !ok { + return core.Doc{}, core.ErrInvalidKey + } + + doc.SetKey(key) + continue + } + + fieldDesc, fieldFound := f.fieldDescriptionsByName[fieldName] + if !fieldFound { + // Note: This can technically happen if a Lens migration returns a field that + // we do not know about. In which case we have to skip it. + continue + } + + fieldValue, err := core.DecodeFieldValue(fieldDesc, fieldByteValue) + if err != nil { + return core.Doc{}, err + } + + index := mapping.FirstIndexOfName(fieldName) + doc.Fields[index] = fieldValue + } + + if value, ok := docAsMap[request.DeletedFieldName]; ok { + if wasDeleted, ok := value.(bool); ok { + if wasDeleted { + doc.Status = client.Deleted + } else { + doc.Status = client.Active + } + } + } + + doc.SchemaVersionID = f.col.Schema.VersionID + + return doc, nil +} + +// lensDocToClientDoc converts a LensDoc to a client.Document. +func (f *lensedFetcher) lensDocToClientDoc(docAsMap LensDoc) (*client.Document, error) { + key, err := client.NewDocKeyFromString(docAsMap[request.KeyFieldName].(string)) + if err != nil { + return nil, err + } + doc := client.NewDocWithKey(key) + + for fieldName, fieldByteValue := range docAsMap { + if fieldName == request.KeyFieldName { + continue + } + + fieldDesc, fieldFound := f.fieldDescriptionsByName[fieldName] + if !fieldFound { + // Note: This can technically happen if a Lens migration returns a field that + // we do not know about. In which case we have to skip it. + continue + } + + fieldValue, err := core.DecodeFieldValue(fieldDesc, fieldByteValue) + if err != nil { + return nil, err + } + + err = doc.SetAs(fieldDesc.Name, fieldValue, fieldDesc.Typ) + if err != nil { + return nil, err + } + } + + doc.SchemaVersionID = f.col.Schema.VersionID + + // Note: client.Document does not have a means of flagging as to whether it is + // deleted or not, and, currently the fetcher does not ever returned deleted items + // from the function that returs this type. + + return doc, nil +} + +// updateDataStore updates the datastore with the migrated values. +// +// This removes the need to migrate a document everytime it is fetched as the second time around +// the underlying fetcher will return the migrated values cached in the datastore instead of the +// underlying dag store values. +func (f *lensedFetcher) updateDataStore(ctx context.Context, original map[string]any, migrated map[string]any) error { + modifiedFieldValuesByName := map[string]any{} + for name, originalValue := range original { + migratedValue, ok := migrated[name] + if !ok { + // If the field is present in the original, and missing from the migrated, it + // means that a migration has removed it, and we should set it to nil. + modifiedFieldValuesByName[name] = nil + continue + } + + // Note: A deep equals check is required here, as the values may be inline-array slices + // Todo: `reflect.DeepEqual` is pretty rubish long-term here and should be replaced + // with something more defra specific: https://github.com/sourcenetwork/defradb/issues/1606 + if !reflect.DeepEqual(originalValue, migratedValue) { + modifiedFieldValuesByName[name] = migratedValue + } + } + + for name, migratedValue := range migrated { + if _, ok := original[name]; !ok { + // If a field has been added by a migration we need to make sure we + // preserve it here. + modifiedFieldValuesByName[name] = migratedValue + continue + } + } + + dockey, ok := original[request.KeyFieldName].(string) + if !ok { + return core.ErrInvalidKey + } + + datastoreKeyBase := core.DataStoreKey{ + CollectionID: f.col.IDString(), + DocKey: dockey, + InstanceType: core.ValueKey, + } + + for fieldName, value := range modifiedFieldValuesByName { + fieldDesc, ok := f.fieldDescriptionsByName[fieldName] + if !ok { + // It may be that the migration has set fields that are unknown to us locally + // in which case we have to skip them for now. + continue + } + fieldKey := datastoreKeyBase.WithFieldId(fieldDesc.ID.String()) + + bytes, err := cbor.Marshal(value) + if err != nil { + return err + } + + err = f.txn.Datastore().Put(ctx, fieldKey.ToDS(), bytes) + if err != nil { + return err + } + } + + versionKey := datastoreKeyBase.WithFieldId(core.DATASTORE_DOC_VERSION_FIELD_ID) + err := f.txn.Datastore().Put(ctx, versionKey.ToDS(), []byte(f.targetVersionID)) + if err != nil { + return err + } + + return nil +} diff --git a/lens/history.go b/lens/history.go new file mode 100644 index 0000000000..0b2a914d94 --- /dev/null +++ b/lens/history.go @@ -0,0 +1,279 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package lens + +import ( + "context" + + "github.com/ipfs/go-datastore/query" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" +) + +// schemaHistoryLink represents an item in a particular schema's history, it +// links to the previous and next version items if they exist. +type schemaHistoryLink struct { + // The schema version id of this history item. + schemaVersionID string + + // The history link to the next schema version, if there is one + // (for the most recent schema version this will be None). + next immutable.Option[*schemaHistoryLink] + + // The history link to the previous schema version, if there is + // one (for the initial schema version this will be None). + previous immutable.Option[*schemaHistoryLink] +} + +// targetedSchemaHistoryLink represents an item in a particular schema's history, it +// links to the previous and next version items if they exist. +// +// It also contains a vector which describes the distance and direction to the +// target schema version (given as an input param on construction). +type targetedSchemaHistoryLink struct { + // The schema version id of this history item. + schemaVersionID string + + // The link to next schema version, if there is one + // (for the most recent schema version this will be None). + next immutable.Option[*targetedSchemaHistoryLink] + + // The link to the previous schema version, if there is + // one (for the initial schema version this will be None). + previous immutable.Option[*targetedSchemaHistoryLink] + + // The distance and direction from this history item to the target. + // + // A zero value indicates that this is the target item. A positive value + // indicates that the target is more recent. A negative value indicates + // that the target predates this history item. + targetVector int +} + +// getTargetedSchemaHistory returns the history of the schema of the given id, relative +// to the given target schema version id. +// +// This includes any history items that are only known via registered +// schema migrations. +func getTargetedSchemaHistory( + ctx context.Context, + txn datastore.Txn, + lensConfigs []client.LensConfig, + schemaID string, + targetSchemaVersionID string, +) (map[schemaVersionID]*targetedSchemaHistoryLink, error) { + history, err := getSchemaHistory(ctx, txn, lensConfigs, schemaID) + if err != nil { + return nil, err + } + + result := map[schemaVersionID]*targetedSchemaHistoryLink{} + + for _, item := range history { + result[item.schemaVersionID] = &targetedSchemaHistoryLink{ + schemaVersionID: item.schemaVersionID, + } + } + + for _, item := range result { + schemaHistoryLink := history[item.schemaVersionID] + nextHistoryItem := schemaHistoryLink.next + if !nextHistoryItem.HasValue() { + continue + } + nextItem := result[nextHistoryItem.Value().schemaVersionID] + item.next = immutable.Some(nextItem) + nextItem.previous = immutable.Some(item) + } + + orphanSchemaVersions := map[string]struct{}{} + + for schemaVersion, item := range result { + if item.schemaVersionID == targetSchemaVersionID { + continue + } + if item.targetVector != 0 { + continue + } + + distanceTravelled := 0 + currentItem := item + wasFound := false + for { + if !currentItem.next.HasValue() { + break + } + + currentItem = currentItem.next.Value() + distanceTravelled++ + if currentItem.targetVector != 0 { + distanceTravelled += currentItem.targetVector + wasFound = true + break + } + if currentItem.schemaVersionID == targetSchemaVersionID { + wasFound = true + break + } + } + + if !wasFound { + // The target was not found going up the chain, try looking back. + // This is important for downgrading schema versions. + for { + if !currentItem.previous.HasValue() { + break + } + + currentItem = currentItem.previous.Value() + distanceTravelled-- + if currentItem.targetVector != 0 { + distanceTravelled += currentItem.targetVector + wasFound = true + break + } + if currentItem.schemaVersionID == targetSchemaVersionID { + wasFound = true + break + } + } + } + + if !wasFound { + // This may happen if users define schema migrations to unknown schema versions + // with no migration path to known schema versions, esentially creating orphan + // migrations. These may become linked later and should remain persisted in the + // database, but we can drop them from the history here/now. + orphanSchemaVersions[schemaVersion] = struct{}{} + continue + } + + item.targetVector = distanceTravelled + } + + for schemaVersion := range orphanSchemaVersions { + delete(result, schemaVersion) + } + + return result, nil +} + +type schemaHistoryPairing struct { + schemaVersionID string + nextSchemaVersionID string +} + +// getSchemaHistory returns the history of the schema of the given id as linked list +// with each item mapped by schema version id. +// +// This includes any history items that are only known via registered +// schema migrations. +func getSchemaHistory( + ctx context.Context, + txn datastore.Txn, + lensConfigs []client.LensConfig, + schemaID string, +) (map[schemaVersionID]*schemaHistoryLink, error) { + pairings := map[string]*schemaHistoryPairing{} + + for _, config := range lensConfigs { + pairings[config.SourceSchemaVersionID] = &schemaHistoryPairing{ + schemaVersionID: config.SourceSchemaVersionID, + nextSchemaVersionID: config.DestinationSchemaVersionID, + } + + if _, ok := pairings[config.DestinationSchemaVersionID]; !ok { + pairings[config.DestinationSchemaVersionID] = &schemaHistoryPairing{ + schemaVersionID: config.DestinationSchemaVersionID, + } + } + } + + prefix := core.NewSchemaHistoryKey(schemaID, "") + q, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: prefix.ToString(), + }) + if err != nil { + return nil, err + } + + for res := range q.Next() { + // check for Done on context first + select { + case <-ctx.Done(): + // we've been cancelled! ;) + return nil, q.Close() + default: + // noop, just continue on the with the for loop + } + + if res.Error != nil { + err = q.Close() + if err != nil { + return nil, err + } + return nil, res.Error + } + + key, err := core.NewSchemaHistoryKeyFromString(res.Key) + if err != nil { + err = q.Close() + if err != nil { + return nil, err + } + return nil, err + } + + // The local schema version history takes priority over and migration-defined history + // and overwrites whatever already exists in the pairings (if any) + pairings[key.PreviousSchemaVersionID] = &schemaHistoryPairing{ + schemaVersionID: key.PreviousSchemaVersionID, + nextSchemaVersionID: string(res.Value), + } + + if _, ok := pairings[string(res.Value)]; !ok { + pairings[string(res.Value)] = &schemaHistoryPairing{ + schemaVersionID: string(res.Value), + } + } + } + + err = q.Close() + if err != nil { + return nil, err + } + + history := map[schemaVersionID]*schemaHistoryLink{} + + for _, pairing := range pairings { + // Convert the temporary types to the cleaner return type: + history[pairing.schemaVersionID] = &schemaHistoryLink{ + schemaVersionID: pairing.schemaVersionID, + } + } + + for _, pairing := range pairings { + src := history[pairing.schemaVersionID] + + // Use the internal pairings to set the next/previous links. This must be + // done after the `history` map has been fully populated, else `src` and + // `next` may not yet have been added to the map. + if next, hasNext := history[pairing.nextSchemaVersionID]; hasNext { + src.next = immutable.Some(next) + next.previous = immutable.Some(src) + } + } + + return history, nil +} diff --git a/lens/lens.go b/lens/lens.go new file mode 100644 index 0000000000..50549542b8 --- /dev/null +++ b/lens/lens.go @@ -0,0 +1,206 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package lens + +import ( + "github.com/sourcenetwork/immutable/enumerable" + + "github.com/sourcenetwork/defradb/client" +) + +type schemaVersionID = string + +// LensDoc represents a document that will be sent to/from a Lens. +type LensDoc = map[string]any + +type lensInput struct { + SchemaVersionID schemaVersionID + Doc LensDoc +} + +// Lens migrate items fed in to the target schema version. +// +// Source documents may be of various schema versions, and may need to be migrated across multiple +// versions. As the input versions are unknown until enumerated, the migration pipeline is constructed +// lazily, as new source schema versions are discovered. If a migration does not exist for a schema +// version, the document will be passed on to the next stage. +type Lens interface { + enumerable.Enumerable[LensDoc] + + // Put feeds the given document into the Lens, so that its transformed output may be yielded. + Put(schemaVersionID schemaVersionID, value LensDoc) error +} + +type lens struct { + lensRegistry client.LensRegistry + + // The primary access points to the lens pipes through which all things flow. + lensPipesBySchemaVersionIDs map[schemaVersionID]enumerable.Concatenation[LensDoc] + + // The input pipes, into which items are added to the pipe system. + lensInputPipesBySchemaVersionIDs map[schemaVersionID]enumerable.Queue[LensDoc] + + // The output pipe, through which all outputs must exit. + outputPipe enumerable.Concatenation[LensDoc] + + schemaVersionHistory map[schemaVersionID]*targetedSchemaHistoryLink + + source enumerable.Queue[lensInput] +} + +var _ Lens = (*lens)(nil) + +func new( + lensRegistry client.LensRegistry, + targetSchemaVersionID schemaVersionID, + schemaVersionHistory map[schemaVersionID]*targetedSchemaHistoryLink, +) Lens { + targetSource := enumerable.NewQueue[LensDoc]() + outputPipe := enumerable.Concat[LensDoc](targetSource) + + return &lens{ + lensRegistry: lensRegistry, + source: enumerable.NewQueue[lensInput](), + outputPipe: outputPipe, + schemaVersionHistory: schemaVersionHistory, + lensInputPipesBySchemaVersionIDs: map[schemaVersionID]enumerable.Queue[LensDoc]{ + targetSchemaVersionID: targetSource, + }, + lensPipesBySchemaVersionIDs: map[schemaVersionID]enumerable.Concatenation[LensDoc]{ + targetSchemaVersionID: outputPipe, + }, + } +} + +// todo - instead of this and a lens-fetcher, we could instead make lens-fetcher (and other fetchers) enumerables +// instead and use those as the `source` directly. +// https://github.com/sourcenetwork/defradb/issues/1589 +func (l *lens) Put(schemaVersionID schemaVersionID, value LensDoc) error { + return l.source.Put(lensInput{ + SchemaVersionID: schemaVersionID, + Doc: value, + }) +} + +// Next reads documents from source, and migrates them to the target schema version. +// +// Source documents may be of various schema versions, and may need to be migrated across multiple +// versions. As the input versions are unknown until enumerated, the migration pipeline is constructed +// lazily, as new source schema versions are discovered. If a migration does not exist for a schema +// version, the document will be passed on to the next stage. +// +// Perhaps the best way to visualize this is as a multi-input marble-run, where inputs and their paths +// are constructed as new marble types are discovered. +// +// - Each version can have one or none migrations. +// - Each migration in the document's path to the target version is guaranteed to recieve the document +// exactly once. +// - Schema history is assumed to be a single straight line with no branching, this will be fixed with +// https://github.com/sourcenetwork/defradb/issues/1598 +func (l *lens) Next() (bool, error) { + // Check the output pipe first, there could be items remaining within waiting to be yielded. + hasValue, err := l.outputPipe.Next() + if err != nil || hasValue { + return hasValue, err + } + + hasValue, err = l.source.Next() + if err != nil || !hasValue { + return false, err + } + + doc, err := l.source.Value() + if err != nil { + return false, err + } + + var inputPipe enumerable.Queue[LensDoc] + if p, ok := l.lensInputPipesBySchemaVersionIDs[doc.SchemaVersionID]; ok { + // If the input pipe exists we can safely assume that it has been correctly connected + // up to the output via any intermediary pipes. + inputPipe = p + } else { + historyLocation := l.schemaVersionHistory[doc.SchemaVersionID] + var pipeHead enumerable.Enumerable[LensDoc] + + for { + junctionPipe, junctionPreviouslyExisted := l.lensPipesBySchemaVersionIDs[historyLocation.schemaVersionID] + if !junctionPreviouslyExisted { + versionInputPipe := enumerable.NewQueue[LensDoc]() + l.lensInputPipesBySchemaVersionIDs[historyLocation.schemaVersionID] = versionInputPipe + if inputPipe == nil { + // The input pipe will be fed documents which are currently at this schema version + inputPipe = versionInputPipe + } + // It is a source of the schemaVersion junction pipe, other schema versions + // may also join as sources to this junction pipe + junctionPipe = enumerable.Concat[LensDoc](versionInputPipe) + l.lensPipesBySchemaVersionIDs[historyLocation.schemaVersionID] = junctionPipe + } + + // If we have previously laid pipe, we need to connect it to the current junction. + // This links a lens migration to the next stage. + if pipeHead != nil { + junctionPipe.Append(pipeHead) + } + + if junctionPreviouslyExisted { + // If the junction pipe previously existed, then we can assume it is already connected to outputPipe + // via any intermediary pipes. + break + } + + if historyLocation.targetVector > 0 { + // Aquire a lens migration from the registery, using the junctionPipe as its source. + // The new pipeHead will then be connected as a source to the next migration-stage on + // the next loop. + pipeHead, err = l.lensRegistry.MigrateUp(junctionPipe, historyLocation.schemaVersionID) + if err != nil { + return false, err + } + + historyLocation = historyLocation.next.Value() + } else { + // The pipe head then becomes the schema version migration to the next version + // sourcing from any documents at schemaVersionID, or lower schema versions. + // This also ensures each document only passes through each migration once, + // in order, and through the same state container (in case migrations use state). + pipeHead, err = l.lensRegistry.MigrateDown(junctionPipe, historyLocation.schemaVersionID) + if err != nil { + return false, err + } + + // Aquire a lens migration from the registery, using the junctionPipe as its source. + // The new pipeHead will then be connected as a source to the next migration-stage on + // the next loop. + historyLocation = historyLocation.previous.Value() + } + } + } + + // Place the current doc in the appropriate input pipe + err = inputPipe.Put(doc.Doc) + if err != nil { + return false, err + } + + // Then draw out the next result result from the output pipe, pulling it through any migrations + // along the way. Typically this will be the (now migrated) document just placed into the input pipe. + return l.outputPipe.Next() +} + +func (l *lens) Value() (LensDoc, error) { + return l.outputPipe.Value() +} + +func (l *lens) Reset() { + l.outputPipe.Reset() +} diff --git a/lens/registry.go b/lens/registry.go new file mode 100644 index 0000000000..aee26104ef --- /dev/null +++ b/lens/registry.go @@ -0,0 +1,365 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package lens + +import ( + "context" + "encoding/json" + "sync" + + "github.com/ipfs/go-datastore/query" + "github.com/lens-vm/lens/host-go/config" + "github.com/lens-vm/lens/host-go/engine/module" + "github.com/lens-vm/lens/host-go/runtimes/wazero" + "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/immutable/enumerable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/errors" +) + +// todo: This file, particularly the `lensPool` stuff, contains fairly sensitive code that is both +// cumbersome to fully test with integration/benchmark tests, and can have a significant affect on +// the users if broken (deadlocks, large performance degradation). It should have proper unit tests. +// https://github.com/sourcenetwork/defradb/issues/1596 + +// lensRegistry is responsible for managing all migration related state within a local +// database instance. +type lensRegistry struct { + poolSize int + + // The runtime used to execute lens wasm modules. + runtime module.Runtime + + // The modules by file path used to instantiate lens wasm module instances. + modulesByPath map[string]module.Module + moduleLock sync.Mutex + + lensPoolsBySchemaVersionID map[string]*lensPool + + // lens configurations by source schema version ID + configs map[string]client.LensConfig +} + +var _ client.LensRegistry = (*lensRegistry)(nil) + +// DefaultPoolSize is the default size of the lens pool for each schema version. +const DefaultPoolSize int = 5 + +// NewRegistry instantiates a new registery. +// +// It will be of size 5 (per schema version) if a size is not provided. +func NewRegistry(lensPoolSize immutable.Option[int]) *lensRegistry { + var size int + if lensPoolSize.HasValue() { + size = lensPoolSize.Value() + } else { + size = DefaultPoolSize + } + + return &lensRegistry{ + poolSize: size, + runtime: wazero.New(), + modulesByPath: map[string]module.Module{}, + lensPoolsBySchemaVersionID: map[string]*lensPool{}, + configs: map[string]client.LensConfig{}, + } +} + +func (r *lensRegistry) SetMigration(ctx context.Context, txn datastore.Txn, cfg client.LensConfig) error { + key := core.NewSchemaVersionMigrationKey(cfg.SourceSchemaVersionID) + + json, err := json.Marshal(cfg) + if err != nil { + return err + } + + err = txn.Systemstore().Put(ctx, key.ToDS(), json) + if err != nil { + return err + } + + err = r.cacheLens(txn, cfg) + if err != nil { + return err + } + + return nil +} + +func (r *lensRegistry) cacheLens(txn datastore.Txn, cfg client.LensConfig) error { + locker, lockerAlreadyExists := r.lensPoolsBySchemaVersionID[cfg.SourceSchemaVersionID] + if !lockerAlreadyExists { + locker = r.newPool(r.poolSize, cfg) + } + + newLensPipes := make([]*lensPipe, r.poolSize) + for i := 0; i < r.poolSize; i++ { + var err error + newLensPipes[i], err = r.newLensPipe(cfg) + if err != nil { + return err + } + } + + // todo - handling txns like this means that the migrations are not available within the current + // transaction if used for stuff (e.g. GQL requests) before commit. + // https://github.com/sourcenetwork/defradb/issues/1592 + txn.OnSuccess(func() { + if !lockerAlreadyExists { + r.lensPoolsBySchemaVersionID[cfg.SourceSchemaVersionID] = locker + } + + drainLoop: + for { + select { + case <-locker.pipes: + default: + break drainLoop + } + } + + for _, lensPipe := range newLensPipes { + locker.returnLens(lensPipe) + } + + r.configs[cfg.SourceSchemaVersionID] = cfg + }) + + return nil +} + +func (r *lensRegistry) ReloadLenses(ctx context.Context, txn datastore.Txn) error { + prefix := core.NewSchemaVersionMigrationKey("") + q, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: prefix.ToString(), + }) + if err != nil { + return err + } + + for res := range q.Next() { + // check for Done on context first + select { + case <-ctx.Done(): + // we've been cancelled! ;) + err = q.Close() + if err != nil { + return err + } + + return nil + default: + // noop, just continue on the with the for loop + } + + if res.Error != nil { + err = q.Close() + if err != nil { + return errors.Wrap(err.Error(), res.Error) + } + return res.Error + } + + var cfg client.LensConfig + err = json.Unmarshal(res.Value, &cfg) + if err != nil { + err = q.Close() + if err != nil { + return err + } + return err + } + + err = r.cacheLens(txn, cfg) + if err != nil { + err = q.Close() + if err != nil { + return errors.Wrap(err.Error(), res.Error) + } + return err + } + } + + err = q.Close() + if err != nil { + return err + } + + return nil +} + +func (r *lensRegistry) MigrateUp( + src enumerable.Enumerable[LensDoc], + schemaVersionID string, +) (enumerable.Enumerable[LensDoc], error) { + lensPool, ok := r.lensPoolsBySchemaVersionID[schemaVersionID] + if !ok { + // If there are no migrations for this schema version, just return the given source. + return src, nil + } + + lens, err := lensPool.borrow() + if err != nil { + return nil, err + } + + lens.SetSource(src) + + return lens, nil +} + +func (*lensRegistry) MigrateDown( + src enumerable.Enumerable[LensDoc], + schemaVersionID string, +) (enumerable.Enumerable[LensDoc], error) { + // todo: https://github.com/sourcenetwork/defradb/issues/1591 + return src, nil +} + +func (r *lensRegistry) Config() []client.LensConfig { + result := []client.LensConfig{} + for _, cfg := range r.configs { + result = append(result, cfg) + } + return result +} + +func (r *lensRegistry) HasMigration(schemaVersionID string) bool { + _, hasMigration := r.lensPoolsBySchemaVersionID[schemaVersionID] + return hasMigration +} + +// lensPool provides a pool-like mechanic for caching a limited number of wasm lens modules in +// a thread safe fashion. +// +// Instanstiating a lens module is pretty expensive as it has to spin up the wasm runtime environment +// so we need to limit how frequently we do this. +type lensPool struct { + // The config used to create the lenses within this locker. + cfg client.LensConfig + + registry *lensRegistry + + // Using a buffered channel provides an easy way to manage a finite + // number of lenses. + // + // We wish to limit this as creating lenses is expensive, and we do not want + // to be dynamically resizing this collection and spinning up new lens instances + // in user time, or holding on to large numbers of them. + pipes chan *lensPipe +} + +func (r *lensRegistry) newPool(lensPoolSize int, cfg client.LensConfig) *lensPool { + return &lensPool{ + cfg: cfg, + registry: r, + pipes: make(chan *lensPipe, lensPoolSize), + } +} + +// borrow attempts to borrow a module from the locker, if one is not available +// it will return a new, temporary instance that will not be returned to the locker +// after use. +func (l *lensPool) borrow() (enumerable.Socket[LensDoc], error) { + select { + case lens := <-l.pipes: + return &borrowedEnumerable{ + source: lens, + pool: l, + }, nil + default: + // If there are no free cached migrations within the locker, create a new temporary one + // instead of blocking. + return l.registry.newLensPipe(l.cfg) + } +} + +// returnLens returns a borrowed module to the locker, allowing it to be reused by other contexts. +func (l *lensPool) returnLens(lens *lensPipe) { + l.pipes <- lens +} + +// borrowedEnumerable is an enumerable tied to a locker. +// +// it exposes the source enumerable and amends the Reset function so that when called, the source +// pipe is returned to the locker. +type borrowedEnumerable struct { + source *lensPipe + pool *lensPool +} + +var _ enumerable.Socket[LensDoc] = (*borrowedEnumerable)(nil) + +func (s *borrowedEnumerable) SetSource(newSource enumerable.Enumerable[LensDoc]) { + s.source.SetSource(newSource) +} + +func (s *borrowedEnumerable) Next() (bool, error) { + return s.source.Next() +} + +func (s *borrowedEnumerable) Value() (LensDoc, error) { + return s.source.Value() +} + +func (s *borrowedEnumerable) Reset() { + s.pool.returnLens(s.source) + s.source.Reset() +} + +// lensPipe provides a mechanic where the underlying wasm module can be hidden from consumers +// and allow input sources to be swapped in and out as different actors borrow it from the locker. +type lensPipe struct { + input enumerable.Socket[LensDoc] + enumerable enumerable.Enumerable[LensDoc] +} + +var _ enumerable.Socket[LensDoc] = (*lensPipe)(nil) + +func (r *lensRegistry) newLensPipe(cfg client.LensConfig) (*lensPipe, error) { + socket := enumerable.NewSocket[LensDoc]() + + r.moduleLock.Lock() + enumerable, err := config.LoadInto[LensDoc, LensDoc](r.runtime, r.modulesByPath, cfg.Lens, socket) + r.moduleLock.Unlock() + + if err != nil { + return nil, err + } + + return &lensPipe{ + input: socket, + enumerable: enumerable, + }, nil +} + +func (p *lensPipe) SetSource(newSource enumerable.Enumerable[LensDoc]) { + p.input.SetSource(newSource) +} + +func (p *lensPipe) Next() (bool, error) { + return p.enumerable.Next() +} + +func (p *lensPipe) Value() (LensDoc, error) { + return p.enumerable.Value() +} + +func (p *lensPipe) Reset() { + p.input.Reset() + // WARNING: Currently the wasm module state is not reset by calling reset on the enumerable + // this means that state from one context may leak to the next useage. There is a ticket here + // to fix this: https://github.com/lens-vm/lens/issues/46 + p.enumerable.Reset() +} diff --git a/licenses/BSL.txt b/licenses/BSL.txt index 54cfbb9150..093935cc02 100644 --- a/licenses/BSL.txt +++ b/licenses/BSL.txt @@ -7,7 +7,7 @@ Parameters Licensor: Democratized Data (D2) Foundation -Licensed Work: DefraDB v0.5.1 +Licensed Work: DefraDB v0.6.0 The Licensed Work is (c) 2023 D2 Foundation. @@ -28,7 +28,7 @@ Additional Use Grant: You may only use the Licensed Work for the -Change Date: 2027-05-16 +Change Date: 2027-07-31 Change License: Apache License, Version 2.0 diff --git a/logging/config.go b/logging/config.go index b6ccccc50d..63cde2ceb5 100644 --- a/logging/config.go +++ b/logging/config.go @@ -110,7 +110,7 @@ type Config struct { OutputPaths []string OverridesByLoggerName map[string]Config - pipe io.Writer // this is used for testing purposes only + Pipe io.Writer // this is used for testing purposes only } func (c Config) forLogger(name string) Config { @@ -121,7 +121,7 @@ func (c Config) forLogger(name string) Config { EnableCaller: c.EnableCaller, EncoderFormat: c.EncoderFormat, OutputPaths: c.OutputPaths, - pipe: c.pipe, + Pipe: c.Pipe, } if override, hasOverride := c.OverridesByLoggerName[name]; hasOverride { @@ -143,8 +143,8 @@ func (c Config) forLogger(name string) Config { if len(override.OutputPaths) != 0 { loggerConfig.OutputPaths = override.OutputPaths } - if override.pipe != nil { - loggerConfig.pipe = override.pipe + if override.Pipe != nil { + loggerConfig.Pipe = override.Pipe } } @@ -161,7 +161,7 @@ func (c Config) copy() Config { EnableCaller: o.EnableCaller, DisableColor: o.DisableColor, OutputPaths: o.OutputPaths, - pipe: o.pipe, + Pipe: o.Pipe, } } @@ -173,7 +173,7 @@ func (c Config) copy() Config { EnableCaller: c.EnableCaller, DisableColor: c.DisableColor, OverridesByLoggerName: overridesByLoggerName, - pipe: c.pipe, + Pipe: c.Pipe, } } @@ -205,8 +205,8 @@ func (oldConfig Config) with(newConfigOptions Config) Config { newConfig.OutputPaths = validatePaths(newConfigOptions.OutputPaths) } - if newConfigOptions.pipe != nil { - newConfig.pipe = newConfigOptions.pipe + if newConfigOptions.Pipe != nil { + newConfig.Pipe = newConfigOptions.Pipe } for k, o := range newConfigOptions.OverridesByLoggerName { @@ -219,7 +219,7 @@ func (oldConfig Config) with(newConfigOptions Config) Config { DisableColor: o.DisableColor, EncoderFormat: o.EncoderFormat, OutputPaths: validatePaths(o.OutputPaths), - pipe: o.pipe, + Pipe: o.Pipe, } } diff --git a/logging/logger.go b/logging/logger.go index cb416e3876..9b9bb20e35 100644 --- a/logging/logger.go +++ b/logging/logger.go @@ -172,8 +172,8 @@ func (l *logger) ApplyConfig(config Config) { l.logger = newLogger if !willOutputToStderrOrStdout(config.OutputPaths) { - if config.pipe != nil { // for testing purposes only - l.consoleLogger = stdlog.New(config.pipe, "", 0) + if config.Pipe != nil { // for testing purposes only + l.consoleLogger = stdlog.New(config.Pipe, "", 0) } else { l.consoleLogger = stdlog.New(os.Stderr, "", 0) } @@ -245,7 +245,7 @@ func buildZapLogger(name string, config Config) (*zap.Logger, error) { return nil, err } - if willOutputToStderrOrStdout(defaultConfig.OutputPaths) && config.pipe != nil { + if willOutputToStderrOrStdout(defaultConfig.OutputPaths) && config.Pipe != nil { newLogger = newLogger.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { cfg := zap.NewProductionEncoderConfig() cfg.ConsoleSeparator = defaultConfig.EncoderConfig.ConsoleSeparator @@ -253,7 +253,7 @@ func buildZapLogger(name string, config Config) (*zap.Logger, error) { cfg.EncodeLevel = defaultConfig.EncoderConfig.EncodeLevel return zapcore.NewCore( zapcore.NewJSONEncoder(cfg), - zapcore.AddSync(config.pipe), + zapcore.Lock(zapcore.AddSync(config.Pipe)), zap.NewAtomicLevelAt(zapcore.Level(config.Level.LogLevel)), ) })) diff --git a/logging/logging_test.go b/logging/logging_test.go index 9ffb7d66d6..0c776ffa33 100644 --- a/logging/logging_test.go +++ b/logging/logging_test.go @@ -379,7 +379,7 @@ func TestLogDoesntWriteMessagesToLogGivenNoLogPath(t *testing.T) { logger, _ := getLogger(t, func(c *Config) { c.Level = NewLogLevelOption(tc.LogLevel) c.OutputPaths = []string{} - c.pipe = b + c.Pipe = b }) logMessage := "test log message" @@ -416,7 +416,7 @@ func TestLogDoesntWriteMessagesToLogGivenNotFoundLogPath(t *testing.T) { logger, _ := getLogger(t, func(c *Config) { c.Level = NewLogLevelOption(tc.LogLevel) c.OutputPaths = []string{"/path/not/found"} - c.pipe = b + c.Pipe = b }) logMessage := "test log message" @@ -453,7 +453,7 @@ func TestLogDoesntWriteMessagesToLogGivenStderrLogPath(t *testing.T) { logger, _ := getLogger(t, func(c *Config) { c.Level = NewLogLevelOption(tc.LogLevel) c.OutputPaths = []string{stderr} - c.pipe = b + c.Pipe = b }) logMessage := "test log message" @@ -568,7 +568,7 @@ func TestLogWritesMessagesToFeedbackLog(t *testing.T) { c.Level = NewLogLevelOption(tc.LogLevel) c.EnableStackTrace = NewEnableStackTraceOption(tc.WithStackTrace) c.EnableCaller = NewEnableCallerOption(tc.WithCaller) - c.pipe = b + c.Pipe = b }) logMessage := "test log message" @@ -613,7 +613,7 @@ func TestLogWritesMessagesToLogGivenPipeWithValidPath(t *testing.T) { b := &bytes.Buffer{} logger, logPath := getLogger(t, func(c *Config) { c.Level = NewLogLevelOption(Info) - c.pipe = b + c.Pipe = b }) logMessage := "test log message" @@ -874,7 +874,7 @@ func TestGetGoLoggerAndApplyConfig(t *testing.T) { b := &bytes.Buffer{} l.ApplyConfig(Config{ EncoderFormat: NewEncoderFormatOption(JSON), - pipe: b, + Pipe: b, }) l.ZapEventLogger.Info("some info") @@ -906,7 +906,7 @@ func TestGetGoLoggerV2AndApplyConfig(t *testing.T) { b := &bytes.Buffer{} l.ApplyConfig(Config{ EncoderFormat: NewEncoderFormatOption(JSON), - pipe: b, + Pipe: b, }) l.ZapEventLogger.Info("some info") diff --git a/merkle/clock/clock_test.go b/merkle/clock/clock_test.go index 2624335bd8..8cee13a2bb 100644 --- a/merkle/clock/clock_test.go +++ b/merkle/clock/clock_test.go @@ -16,9 +16,9 @@ import ( cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" - mh "github.com/multiformats/go-multihash" "github.com/sourcenetwork/defradb/core" + ccid "github.com/sourcenetwork/defradb/core/cid" "github.com/sourcenetwork/defradb/core/crdt" "github.com/sourcenetwork/defradb/datastore" ) @@ -79,15 +79,7 @@ func TestMerkleClockPutBlockWithHeads(t *testing.T) { delta := &crdt.LWWRegDelta{ Data: []byte("test"), } - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } - - // And then feed it some data - c, err := pref.Sum([]byte("Hello World!")) + c, err := ccid.NewSHA256CidV1([]byte("Hello World!")) if err != nil { t.Error("Failed to create new head CID:", err) return diff --git a/merkle/clock/heads_test.go b/merkle/clock/heads_test.go index c9c6212c5c..a857571515 100644 --- a/merkle/clock/heads_test.go +++ b/merkle/clock/heads_test.go @@ -20,26 +20,19 @@ import ( "testing" "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" "github.com/sourcenetwork/defradb/core" + ccid "github.com/sourcenetwork/defradb/core/cid" "github.com/sourcenetwork/defradb/datastore" ) func newRandomCID() cid.Cid { - pref := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length - } - // And then feed it some data bs := make([]byte, 4) i := rand.Uint32() binary.LittleEndian.PutUint32(bs, i) - c, err := pref.Sum(bs) + c, err := ccid.NewSHA256CidV1(bs) if err != nil { return cid.Undef } diff --git a/merkle/clock/ipld.go b/merkle/clock/ipld.go index e982ced1ee..484a145dce 100644 --- a/merkle/clock/ipld.go +++ b/merkle/clock/ipld.go @@ -27,10 +27,6 @@ import ( var _ core.NodeGetter = (*CrdtNodeGetter)(nil) -func init() { - ipld.Register(cid.DagProtobuf, dag.DecodeProtobufBlock) -} - type DeltaExtractorFn func(ipld.Node) (core.Delta, error) // crdtNodeGetter wraps an ipld.NodeGetter with some additional utility methods diff --git a/metric/metric.go b/metric/metric.go index f8eb82d699..f267a7ed5d 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -20,9 +20,8 @@ import ( "encoding/json" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/unit" otelMetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" ) var _ Metric = (*Meter)(nil) @@ -35,7 +34,7 @@ type Metric interface { Register(name string) // Dump is responsible to read the metrics and output all the gathered data. - Dump(ctx context.Context) (any, error) + Dump(ctx context.Context) (*metricdata.ResourceMetrics, error) // Close shutsdown the meter. Close(ctx context.Context) error @@ -61,8 +60,12 @@ func (m *Meter) Register(name string) { } // Dump is responsible to read the metrics and output all the gathered data. -func (m *Meter) Dump(ctx context.Context) (any, error) { - return m.reader.Collect(ctx) +func (m *Meter) Dump(ctx context.Context) (*metricdata.ResourceMetrics, error) { + out := &metricdata.ResourceMetrics{} + if err := m.reader.Collect(ctx, out); err != nil { + return nil, err + } + return out, nil } // Close shutsdown the meter. @@ -73,33 +76,33 @@ func (m *Meter) Close(ctx context.Context) error { // GetSyncHistogram returns a new histogram with the given name and unit. func (m *Meter) GetSyncHistogram( name string, - unit unit.Unit, -) (instrument.Int64Histogram, error) { + unit string, +) (metric.Int64Histogram, error) { return m.meter.Int64Histogram( name, - instrument.WithUnit(unit), + metric.WithUnit(unit), ) } // GetSyncCounter returns a new counter with the given name and unit. func (m *Meter) GetSyncCounter( name string, - unit unit.Unit, -) (instrument.Int64Counter, error) { + unit string, +) (metric.Int64Counter, error) { return m.meter.Int64Counter( name, - instrument.WithUnit(unit), + metric.WithUnit(unit), ) } // DumpScopeMetricsString returns a string representation of the metrics. func (m *Meter) DumpScopeMetricsString(ctx context.Context) (string, error) { - data, err := m.reader.Collect(ctx) - if err != nil { + out := &metricdata.ResourceMetrics{} + if err := m.reader.Collect(ctx, out); err != nil { return "", err } - jsonBytes, err := json.MarshalIndent(data.ScopeMetrics, "", " ") + jsonBytes, err := json.MarshalIndent(out.ScopeMetrics, "", " ") if err != nil { return "", err } diff --git a/metric/metric_test.go b/metric/metric_test.go index a8af66414d..b89700fd74 100644 --- a/metric/metric_test.go +++ b/metric/metric_test.go @@ -16,7 +16,6 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/otel/metric/unit" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -25,7 +24,7 @@ func TestMetricSyncHistogram(t *testing.T) { meter.Register("HistogramOnly") workDuration, err := meter.GetSyncHistogram( "workDuration", - unit.Milliseconds, + "ms", ) if err != nil { t.Error(err) @@ -46,30 +45,25 @@ func TestMetricSyncHistogram(t *testing.T) { // Goes in third bucket. workDuration.Record(ctx, elapsedTime.Nanoseconds()) - dump, err := meter.Dump(ctx) + data, err := meter.Dump(ctx) if err != nil { t.Error(err) } - data, isMatricData := dump.(metricdata.ResourceMetrics) - if !isMatricData { - t.Error(err) - } - assert.Equal(t, 1, len(data.ScopeMetrics)) assert.Equal(t, "HistogramOnly", data.ScopeMetrics[0].Scope.Name) assert.Equal(t, 1, len(data.ScopeMetrics[0].Metrics)) assert.Equal(t, "workDuration", data.ScopeMetrics[0].Metrics[0].Name) firstMetricData := data.ScopeMetrics[0].Metrics[0].Data - histData, isHistData := firstMetricData.(metricdata.Histogram) + histData, isHistData := firstMetricData.(metricdata.Histogram[int64]) if !isHistData { t.Error(err) } assert.Equal(t, 1, len(histData.DataPoints)) assert.Equal(t, uint64(3), histData.DataPoints[0].Count) - assert.Equal(t, 12.0, histData.DataPoints[0].Sum) // 2 + 4 + 6 + assert.Equal(t, int64(12), histData.DataPoints[0].Sum) // 2 + 4 + 6 assert.Equal( t, []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, @@ -91,7 +85,7 @@ func TestMetricSyncCounter(t *testing.T) { meter.Register("CounterOnly") stuffCounter, err := meter.GetSyncCounter( "countStuff", - unit.Dimensionless, + "1", ) if err != nil { t.Error(err) @@ -101,16 +95,11 @@ func TestMetricSyncCounter(t *testing.T) { stuffCounter.Add(ctx, 12) stuffCounter.Add(ctx, 1) - dump, err := meter.Dump(ctx) + data, err := meter.Dump(ctx) if err != nil { t.Error(err) } - data, isMatricData := dump.(metricdata.ResourceMetrics) - if !isMatricData { - t.Error(err) - } - assert.Equal(t, 1, len(data.ScopeMetrics)) assert.Equal(t, "CounterOnly", data.ScopeMetrics[0].Scope.Name) assert.Equal(t, 1, len(data.ScopeMetrics[0].Metrics)) @@ -137,7 +126,7 @@ func TestMetricWithCounterAndHistogramIntrumentOnOneMeter(t *testing.T) { stuffCounter, err := meter.GetSyncCounter( "countStuff", - unit.Dimensionless, + "1", ) if err != nil { t.Error(err) @@ -145,7 +134,7 @@ func TestMetricWithCounterAndHistogramIntrumentOnOneMeter(t *testing.T) { workDuration, err := meter.GetSyncHistogram( "workDuration", - unit.Milliseconds, + "ms", ) if err != nil { t.Error(err) @@ -166,16 +155,11 @@ func TestMetricWithCounterAndHistogramIntrumentOnOneMeter(t *testing.T) { stuffCounter.Add(ctx, 1) - dump, err := meter.Dump(ctx) + data, err := meter.Dump(ctx) if err != nil { t.Error(err) } - data, isMatricData := dump.(metricdata.ResourceMetrics) - if !isMatricData { - t.Error(err) - } - assert.Equal(t, 1, len(data.ScopeMetrics)) assert.Equal(t, "CounterAndHistogram", data.ScopeMetrics[0].Scope.Name) @@ -197,14 +181,14 @@ func TestMetricWithCounterAndHistogramIntrumentOnOneMeter(t *testing.T) { assert.Equal(t, "workDuration", metrics[1].Name) histMetricData := metrics[1].Data - histData, isHistData := histMetricData.(metricdata.Histogram) + histData, isHistData := histMetricData.(metricdata.Histogram[int64]) if !isHistData { t.Error(err) } assert.Equal(t, 1, len(histData.DataPoints)) assert.Equal(t, uint64(3), histData.DataPoints[0].Count) - assert.Equal(t, 12.0, histData.DataPoints[0].Sum) // 2 + 4 + 6 + assert.Equal(t, int64(12), histData.DataPoints[0].Sum) // 2 + 4 + 6 assert.Equal( t, []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, diff --git a/net/api/client/client.go b/net/api/client/client.go index b6bfe5e386..2ea92bd14c 100644 --- a/net/api/client/client.go +++ b/net/api/client/client.go @@ -18,15 +18,22 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" + codec "github.com/planetscale/vtprotobuf/codec/grpc" "google.golang.org/grpc" + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" - pb "github.com/sourcenetwork/defradb/net/api/pb" + pb "github.com/sourcenetwork/defradb/net/pb" ) +func init() { + encoding.RegisterCodec(codec.Codec{}) +} + type Client struct { - c pb.ServiceClient + c pb.CollectionClient conn *grpc.ClientConn } @@ -38,7 +45,7 @@ func NewClient(target string, opts ...grpc.DialOption) (*Client, error) { } return &Client{ - c: pb.NewServiceClient(conn), + c: pb.NewCollectionClient(conn), conn: conn, }, nil } diff --git a/net/api/pb/Makefile b/net/api/pb/Makefile index e96e192c5a..62eef77354 100644 --- a/net/api/pb/Makefile +++ b/net/api/pb/Makefile @@ -4,9 +4,12 @@ GO = $(PB:.proto=.pb.go) all: $(GO) %.pb.go: %.proto - protoc -I=. -I=$(GOPATH)/src -I=$(GOPATH)/src/github.com/gogo/protobuf/protobuf --gogofaster_out=\ - plugins=grpc:\ - . $< + protoc \ + --go_out=. --plugin protoc-gen-go="${GOBIN}/protoc-gen-go" \ + --go-grpc_out=. --plugin protoc-gen-go-grpc="${GOBIN}/protoc-gen-go-grpc" \ + --go-vtproto_out=. --plugin protoc-gen-go-vtproto="${GOBIN}/protoc-gen-go-vtproto" \ + --go-vtproto_opt=features=marshal+unmarshal+size \ + $< clean: rm -f *.pb.go diff --git a/net/api/pb/api.pb.go b/net/api/pb/api.pb.go index e34f954bc9..ad48069b8f 100644 --- a/net/api/pb/api.pb.go +++ b/net/api/pb/api.pb.go @@ -1,3150 +1,1100 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.9 // source: api.proto package api_pb import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type SetReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` Addr []byte `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` } -func (m *SetReplicatorRequest) Reset() { *m = SetReplicatorRequest{} } -func (m *SetReplicatorRequest) String() string { return proto.CompactTextString(m) } -func (*SetReplicatorRequest) ProtoMessage() {} -func (*SetReplicatorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{0} -} -func (m *SetReplicatorRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SetReplicatorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SetReplicatorRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *SetReplicatorRequest) Reset() { + *x = SetReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *SetReplicatorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetReplicatorRequest.Merge(m, src) -} -func (m *SetReplicatorRequest) XXX_Size() int { - return m.Size() + +func (x *SetReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SetReplicatorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetReplicatorRequest.DiscardUnknown(m) + +func (*SetReplicatorRequest) ProtoMessage() {} + +func (x *SetReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SetReplicatorRequest proto.InternalMessageInfo +// Deprecated: Use SetReplicatorRequest.ProtoReflect.Descriptor instead. +func (*SetReplicatorRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{0} +} -func (m *SetReplicatorRequest) GetCollections() []string { - if m != nil { - return m.Collections +func (x *SetReplicatorRequest) GetCollections() []string { + if x != nil { + return x.Collections } return nil } -func (m *SetReplicatorRequest) GetAddr() []byte { - if m != nil { - return m.Addr +func (x *SetReplicatorRequest) GetAddr() []byte { + if x != nil { + return x.Addr } return nil } type SetReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` } -func (m *SetReplicatorReply) Reset() { *m = SetReplicatorReply{} } -func (m *SetReplicatorReply) String() string { return proto.CompactTextString(m) } -func (*SetReplicatorReply) ProtoMessage() {} -func (*SetReplicatorReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{1} -} -func (m *SetReplicatorReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SetReplicatorReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SetReplicatorReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *SetReplicatorReply) Reset() { + *x = SetReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *SetReplicatorReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetReplicatorReply.Merge(m, src) -} -func (m *SetReplicatorReply) XXX_Size() int { - return m.Size() + +func (x *SetReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SetReplicatorReply) XXX_DiscardUnknown() { - xxx_messageInfo_SetReplicatorReply.DiscardUnknown(m) + +func (*SetReplicatorReply) ProtoMessage() {} + +func (x *SetReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SetReplicatorReply proto.InternalMessageInfo +// Deprecated: Use SetReplicatorReply.ProtoReflect.Descriptor instead. +func (*SetReplicatorReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{1} +} -func (m *SetReplicatorReply) GetPeerID() []byte { - if m != nil { - return m.PeerID +func (x *SetReplicatorReply) GetPeerID() []byte { + if x != nil { + return x.PeerID } return nil } type DeleteReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` } -func (m *DeleteReplicatorRequest) Reset() { *m = DeleteReplicatorRequest{} } -func (m *DeleteReplicatorRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteReplicatorRequest) ProtoMessage() {} -func (*DeleteReplicatorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{2} -} -func (m *DeleteReplicatorRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteReplicatorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteReplicatorRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteReplicatorRequest) Reset() { + *x = DeleteReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteReplicatorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteReplicatorRequest.Merge(m, src) -} -func (m *DeleteReplicatorRequest) XXX_Size() int { - return m.Size() + +func (x *DeleteReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteReplicatorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteReplicatorRequest.DiscardUnknown(m) + +func (*DeleteReplicatorRequest) ProtoMessage() {} + +func (x *DeleteReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DeleteReplicatorRequest proto.InternalMessageInfo +// Deprecated: Use DeleteReplicatorRequest.ProtoReflect.Descriptor instead. +func (*DeleteReplicatorRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{2} +} -func (m *DeleteReplicatorRequest) GetPeerID() []byte { - if m != nil { - return m.PeerID +func (x *DeleteReplicatorRequest) GetPeerID() []byte { + if x != nil { + return x.PeerID } return nil } type DeleteReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` } -func (m *DeleteReplicatorReply) Reset() { *m = DeleteReplicatorReply{} } -func (m *DeleteReplicatorReply) String() string { return proto.CompactTextString(m) } -func (*DeleteReplicatorReply) ProtoMessage() {} -func (*DeleteReplicatorReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{3} -} -func (m *DeleteReplicatorReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteReplicatorReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteReplicatorReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteReplicatorReply) Reset() { + *x = DeleteReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteReplicatorReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteReplicatorReply.Merge(m, src) -} -func (m *DeleteReplicatorReply) XXX_Size() int { - return m.Size() -} -func (m *DeleteReplicatorReply) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteReplicatorReply.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteReplicatorReply proto.InternalMessageInfo -func (m *DeleteReplicatorReply) GetPeerID() []byte { - if m != nil { - return m.PeerID - } - return nil +func (x *DeleteReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type GetAllReplicatorRequest struct { -} +func (*DeleteReplicatorReply) ProtoMessage() {} -func (m *GetAllReplicatorRequest) Reset() { *m = GetAllReplicatorRequest{} } -func (m *GetAllReplicatorRequest) String() string { return proto.CompactTextString(m) } -func (*GetAllReplicatorRequest) ProtoMessage() {} -func (*GetAllReplicatorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{4} -} -func (m *GetAllReplicatorRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllReplicatorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllReplicatorRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *DeleteReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *GetAllReplicatorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllReplicatorRequest.Merge(m, src) -} -func (m *GetAllReplicatorRequest) XXX_Size() int { - return m.Size() -} -func (m *GetAllReplicatorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllReplicatorRequest.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_GetAllReplicatorRequest proto.InternalMessageInfo - -type GetAllReplicatorReply struct { - Replicators []*GetAllReplicatorReply_Replicators `protobuf:"bytes,1,rep,name=replicators,proto3" json:"replicators,omitempty"` +// Deprecated: Use DeleteReplicatorReply.ProtoReflect.Descriptor instead. +func (*DeleteReplicatorReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{3} } -func (m *GetAllReplicatorReply) Reset() { *m = GetAllReplicatorReply{} } -func (m *GetAllReplicatorReply) String() string { return proto.CompactTextString(m) } -func (*GetAllReplicatorReply) ProtoMessage() {} -func (*GetAllReplicatorReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{5} -} -func (m *GetAllReplicatorReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllReplicatorReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllReplicatorReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteReplicatorReply) GetPeerID() []byte { + if x != nil { + return x.PeerID } -} -func (m *GetAllReplicatorReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllReplicatorReply.Merge(m, src) -} -func (m *GetAllReplicatorReply) XXX_Size() int { - return m.Size() -} -func (m *GetAllReplicatorReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllReplicatorReply.DiscardUnknown(m) + return nil } -var xxx_messageInfo_GetAllReplicatorReply proto.InternalMessageInfo +type GetAllReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} -func (m *GetAllReplicatorReply) GetReplicators() []*GetAllReplicatorReply_Replicators { - if m != nil { - return m.Replicators +func (x *GetAllReplicatorRequest) Reset() { + *x = GetAllReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type GetAllReplicatorReply_Replicators struct { - Info *GetAllReplicatorReply_Replicators_Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` - Schemas []string `protobuf:"bytes,2,rep,name=schemas,proto3" json:"schemas,omitempty"` +func (x *GetAllReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetAllReplicatorReply_Replicators) Reset() { *m = GetAllReplicatorReply_Replicators{} } -func (m *GetAllReplicatorReply_Replicators) String() string { return proto.CompactTextString(m) } -func (*GetAllReplicatorReply_Replicators) ProtoMessage() {} -func (*GetAllReplicatorReply_Replicators) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{5, 0} -} -func (m *GetAllReplicatorReply_Replicators) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllReplicatorReply_Replicators) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllReplicatorReply_Replicators.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetAllReplicatorRequest) ProtoMessage() {} + +func (x *GetAllReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetAllReplicatorReply_Replicators) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllReplicatorReply_Replicators.Merge(m, src) -} -func (m *GetAllReplicatorReply_Replicators) XXX_Size() int { - return m.Size() -} -func (m *GetAllReplicatorReply_Replicators) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllReplicatorReply_Replicators.DiscardUnknown(m) + +// Deprecated: Use GetAllReplicatorRequest.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{4} } -var xxx_messageInfo_GetAllReplicatorReply_Replicators proto.InternalMessageInfo +type GetAllReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *GetAllReplicatorReply_Replicators) GetInfo() *GetAllReplicatorReply_Replicators_Info { - if m != nil { - return m.Info - } - return nil + Replicators []*GetAllReplicatorReply_Replicators `protobuf:"bytes,1,rep,name=replicators,proto3" json:"replicators,omitempty"` } -func (m *GetAllReplicatorReply_Replicators) GetSchemas() []string { - if m != nil { - return m.Schemas +func (x *GetAllReplicatorReply) Reset() { + *x = GetAllReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type GetAllReplicatorReply_Replicators_Info struct { - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Addrs []byte `protobuf:"bytes,2,opt,name=addrs,proto3" json:"addrs,omitempty"` +func (x *GetAllReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetAllReplicatorReply_Replicators_Info) Reset() { - *m = GetAllReplicatorReply_Replicators_Info{} -} -func (m *GetAllReplicatorReply_Replicators_Info) String() string { return proto.CompactTextString(m) } -func (*GetAllReplicatorReply_Replicators_Info) ProtoMessage() {} -func (*GetAllReplicatorReply_Replicators_Info) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{5, 0, 0} -} -func (m *GetAllReplicatorReply_Replicators_Info) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllReplicatorReply_Replicators_Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllReplicatorReply_Replicators_Info.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetAllReplicatorReply) ProtoMessage() {} + +func (x *GetAllReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *GetAllReplicatorReply_Replicators_Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllReplicatorReply_Replicators_Info.Merge(m, src) -} -func (m *GetAllReplicatorReply_Replicators_Info) XXX_Size() int { - return m.Size() -} -func (m *GetAllReplicatorReply_Replicators_Info) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllReplicatorReply_Replicators_Info.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_GetAllReplicatorReply_Replicators_Info proto.InternalMessageInfo - -func (m *GetAllReplicatorReply_Replicators_Info) GetId() []byte { - if m != nil { - return m.Id - } - return nil +// Deprecated: Use GetAllReplicatorReply.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{5} } -func (m *GetAllReplicatorReply_Replicators_Info) GetAddrs() []byte { - if m != nil { - return m.Addrs +func (x *GetAllReplicatorReply) GetReplicators() []*GetAllReplicatorReply_Replicators { + if x != nil { + return x.Replicators } return nil } type AddP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *AddP2PCollectionsRequest) Reset() { *m = AddP2PCollectionsRequest{} } -func (m *AddP2PCollectionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddP2PCollectionsRequest) ProtoMessage() {} -func (*AddP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{6} -} -func (m *AddP2PCollectionsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AddP2PCollectionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AddP2PCollectionsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *AddP2PCollectionsRequest) Reset() { + *x = AddP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *AddP2PCollectionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddP2PCollectionsRequest.Merge(m, src) -} -func (m *AddP2PCollectionsRequest) XXX_Size() int { - return m.Size() + +func (x *AddP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AddP2PCollectionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddP2PCollectionsRequest.DiscardUnknown(m) + +func (*AddP2PCollectionsRequest) ProtoMessage() {} + +func (x *AddP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_AddP2PCollectionsRequest proto.InternalMessageInfo +// Deprecated: Use AddP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*AddP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{6} +} -func (m *AddP2PCollectionsRequest) GetCollections() []string { - if m != nil { - return m.Collections +func (x *AddP2PCollectionsRequest) GetCollections() []string { + if x != nil { + return x.Collections } return nil } type AddP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` } -func (m *AddP2PCollectionsReply) Reset() { *m = AddP2PCollectionsReply{} } -func (m *AddP2PCollectionsReply) String() string { return proto.CompactTextString(m) } -func (*AddP2PCollectionsReply) ProtoMessage() {} -func (*AddP2PCollectionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{7} -} -func (m *AddP2PCollectionsReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AddP2PCollectionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AddP2PCollectionsReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *AddP2PCollectionsReply) Reset() { + *x = AddP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *AddP2PCollectionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddP2PCollectionsReply.Merge(m, src) -} -func (m *AddP2PCollectionsReply) XXX_Size() int { - return m.Size() -} -func (m *AddP2PCollectionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_AddP2PCollectionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_AddP2PCollectionsReply proto.InternalMessageInfo -func (m *AddP2PCollectionsReply) GetErr() string { - if m != nil { - return m.Err - } - return "" +func (x *AddP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type RemoveP2PCollectionsRequest struct { - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} +func (*AddP2PCollectionsReply) ProtoMessage() {} -func (m *RemoveP2PCollectionsRequest) Reset() { *m = RemoveP2PCollectionsRequest{} } -func (m *RemoveP2PCollectionsRequest) String() string { return proto.CompactTextString(m) } -func (*RemoveP2PCollectionsRequest) ProtoMessage() {} -func (*RemoveP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{8} -} -func (m *RemoveP2PCollectionsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RemoveP2PCollectionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RemoveP2PCollectionsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *AddP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *RemoveP2PCollectionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveP2PCollectionsRequest.Merge(m, src) -} -func (m *RemoveP2PCollectionsRequest) XXX_Size() int { - return m.Size() -} -func (m *RemoveP2PCollectionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveP2PCollectionsRequest.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_RemoveP2PCollectionsRequest proto.InternalMessageInfo +// Deprecated: Use AddP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*AddP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{7} +} -func (m *RemoveP2PCollectionsRequest) GetCollections() []string { - if m != nil { - return m.Collections +func (x *AddP2PCollectionsReply) GetErr() string { + if x != nil { + return x.Err } - return nil + return "" } -type RemoveP2PCollectionsReply struct { - Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` +type RemoveP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *RemoveP2PCollectionsReply) Reset() { *m = RemoveP2PCollectionsReply{} } -func (m *RemoveP2PCollectionsReply) String() string { return proto.CompactTextString(m) } -func (*RemoveP2PCollectionsReply) ProtoMessage() {} -func (*RemoveP2PCollectionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{9} -} -func (m *RemoveP2PCollectionsReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RemoveP2PCollectionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RemoveP2PCollectionsReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *RemoveP2PCollectionsRequest) Reset() { + *x = RemoveP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *RemoveP2PCollectionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveP2PCollectionsReply.Merge(m, src) -} -func (m *RemoveP2PCollectionsReply) XXX_Size() int { - return m.Size() -} -func (m *RemoveP2PCollectionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveP2PCollectionsReply.DiscardUnknown(m) + +func (x *RemoveP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_RemoveP2PCollectionsReply proto.InternalMessageInfo +func (*RemoveP2PCollectionsRequest) ProtoMessage() {} -func (m *RemoveP2PCollectionsReply) GetErr() string { - if m != nil { - return m.Err +func (x *RemoveP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -type GetAllP2PCollectionsRequest struct { +// Deprecated: Use RemoveP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*RemoveP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{8} } -func (m *GetAllP2PCollectionsRequest) Reset() { *m = GetAllP2PCollectionsRequest{} } -func (m *GetAllP2PCollectionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetAllP2PCollectionsRequest) ProtoMessage() {} -func (*GetAllP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{10} -} -func (m *GetAllP2PCollectionsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllP2PCollectionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllP2PCollectionsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *RemoveP2PCollectionsRequest) GetCollections() []string { + if x != nil { + return x.Collections } -} -func (m *GetAllP2PCollectionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllP2PCollectionsRequest.Merge(m, src) -} -func (m *GetAllP2PCollectionsRequest) XXX_Size() int { - return m.Size() -} -func (m *GetAllP2PCollectionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllP2PCollectionsRequest.DiscardUnknown(m) + return nil } -var xxx_messageInfo_GetAllP2PCollectionsRequest proto.InternalMessageInfo +type RemoveP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type GetAllP2PCollectionsReply struct { - Collections []*GetAllP2PCollectionsReply_Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` } -func (m *GetAllP2PCollectionsReply) Reset() { *m = GetAllP2PCollectionsReply{} } -func (m *GetAllP2PCollectionsReply) String() string { return proto.CompactTextString(m) } -func (*GetAllP2PCollectionsReply) ProtoMessage() {} -func (*GetAllP2PCollectionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{11} -} -func (m *GetAllP2PCollectionsReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllP2PCollectionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllP2PCollectionsReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *RemoveP2PCollectionsReply) Reset() { + *x = RemoveP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetAllP2PCollectionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllP2PCollectionsReply.Merge(m, src) -} -func (m *GetAllP2PCollectionsReply) XXX_Size() int { - return m.Size() -} -func (m *GetAllP2PCollectionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllP2PCollectionsReply.DiscardUnknown(m) -} -var xxx_messageInfo_GetAllP2PCollectionsReply proto.InternalMessageInfo - -func (m *GetAllP2PCollectionsReply) GetCollections() []*GetAllP2PCollectionsReply_Collection { - if m != nil { - return m.Collections - } - return nil +func (x *RemoveP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type GetAllP2PCollectionsReply_Collection struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` -} +func (*RemoveP2PCollectionsReply) ProtoMessage() {} -func (m *GetAllP2PCollectionsReply_Collection) Reset() { *m = GetAllP2PCollectionsReply_Collection{} } -func (m *GetAllP2PCollectionsReply_Collection) String() string { return proto.CompactTextString(m) } -func (*GetAllP2PCollectionsReply_Collection) ProtoMessage() {} -func (*GetAllP2PCollectionsReply_Collection) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{11, 0} -} -func (m *GetAllP2PCollectionsReply_Collection) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllP2PCollectionsReply_Collection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllP2PCollectionsReply_Collection.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *RemoveP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetAllP2PCollectionsReply_Collection) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllP2PCollectionsReply_Collection.Merge(m, src) -} -func (m *GetAllP2PCollectionsReply_Collection) XXX_Size() int { - return m.Size() -} -func (m *GetAllP2PCollectionsReply_Collection) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllP2PCollectionsReply_Collection.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAllP2PCollectionsReply_Collection proto.InternalMessageInfo -func (m *GetAllP2PCollectionsReply_Collection) GetId() string { - if m != nil { - return m.Id - } - return "" +// Deprecated: Use RemoveP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*RemoveP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{9} } -func (m *GetAllP2PCollectionsReply_Collection) GetName() string { - if m != nil { - return m.Name +func (x *RemoveP2PCollectionsReply) GetErr() string { + if x != nil { + return x.Err } return "" } -func init() { - proto.RegisterType((*SetReplicatorRequest)(nil), "api.pb.SetReplicatorRequest") - proto.RegisterType((*SetReplicatorReply)(nil), "api.pb.SetReplicatorReply") - proto.RegisterType((*DeleteReplicatorRequest)(nil), "api.pb.DeleteReplicatorRequest") - proto.RegisterType((*DeleteReplicatorReply)(nil), "api.pb.DeleteReplicatorReply") - proto.RegisterType((*GetAllReplicatorRequest)(nil), "api.pb.GetAllReplicatorRequest") - proto.RegisterType((*GetAllReplicatorReply)(nil), "api.pb.GetAllReplicatorReply") - proto.RegisterType((*GetAllReplicatorReply_Replicators)(nil), "api.pb.GetAllReplicatorReply.Replicators") - proto.RegisterType((*GetAllReplicatorReply_Replicators_Info)(nil), "api.pb.GetAllReplicatorReply.Replicators.Info") - proto.RegisterType((*AddP2PCollectionsRequest)(nil), "api.pb.AddP2PCollectionsRequest") - proto.RegisterType((*AddP2PCollectionsReply)(nil), "api.pb.AddP2PCollectionsReply") - proto.RegisterType((*RemoveP2PCollectionsRequest)(nil), "api.pb.RemoveP2PCollectionsRequest") - proto.RegisterType((*RemoveP2PCollectionsReply)(nil), "api.pb.RemoveP2PCollectionsReply") - proto.RegisterType((*GetAllP2PCollectionsRequest)(nil), "api.pb.GetAllP2PCollectionsRequest") - proto.RegisterType((*GetAllP2PCollectionsReply)(nil), "api.pb.GetAllP2PCollectionsReply") - proto.RegisterType((*GetAllP2PCollectionsReply_Collection)(nil), "api.pb.GetAllP2PCollectionsReply.Collection") -} - -func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } - -var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 524 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0x94, 0x40, - 0x14, 0x06, 0x8a, 0x6d, 0x78, 0xa8, 0x69, 0x5f, 0xb6, 0x2d, 0x4b, 0x2d, 0xae, 0xe3, 0xa5, 0x9a, - 0x15, 0x15, 0xaf, 0x26, 0xa6, 0xb5, 0x89, 0x69, 0x6a, 0x4c, 0x43, 0x35, 0x5e, 0xa5, 0xf0, 0x1a, - 0x49, 0xd8, 0x05, 0x01, 0x9b, 0xf4, 0xe6, 0x4f, 0xf0, 0xea, 0xc1, 0xc4, 0x9f, 0xe3, 0xb1, 0x47, - 0x8f, 0x66, 0xf7, 0x8f, 0x18, 0x86, 0xa5, 0xec, 0x2e, 0x03, 0x69, 0xbc, 0xcd, 0xcc, 0x7b, 0xdf, - 0xf7, 0x0d, 0xf3, 0x7d, 0x0f, 0xd0, 0xbc, 0x24, 0xb4, 0x93, 0x34, 0xce, 0x63, 0x5c, 0xe5, 0xcb, - 0x33, 0xf6, 0x16, 0x7a, 0xa7, 0x94, 0xbb, 0x94, 0x44, 0xa1, 0xef, 0xe5, 0x71, 0xea, 0xd2, 0x97, - 0xaf, 0x94, 0xe5, 0x38, 0x00, 0xdd, 0x8f, 0xa3, 0x88, 0xfc, 0x3c, 0x8c, 0xc7, 0x99, 0x21, 0x0f, - 0x56, 0xf6, 0x34, 0x77, 0xfe, 0x08, 0x11, 0x54, 0x2f, 0x08, 0x52, 0x43, 0x19, 0xc8, 0x7b, 0xb7, - 0x5d, 0xbe, 0x66, 0x43, 0xc0, 0x25, 0xb6, 0x24, 0xba, 0xc4, 0x2d, 0x58, 0x4d, 0x88, 0xd2, 0xa3, - 0x43, 0x43, 0xe6, 0xbd, 0xb3, 0x1d, 0x7b, 0x0e, 0xdb, 0x87, 0x14, 0x51, 0x4e, 0x4d, 0xf9, 0x36, - 0xc8, 0x53, 0xd8, 0x6c, 0x42, 0xba, 0x34, 0xfa, 0xb0, 0xfd, 0x86, 0xf2, 0xfd, 0x28, 0x6a, 0x68, - 0xb0, 0x6f, 0x0a, 0x6c, 0x36, 0x6b, 0x05, 0xd9, 0x31, 0xe8, 0xe9, 0xf5, 0x51, 0xf9, 0xf1, 0xba, - 0xf3, 0xc8, 0x2e, 0x9f, 0xcc, 0x16, 0x62, 0xec, 0x7a, 0x9f, 0xb9, 0xf3, 0x68, 0xf3, 0x87, 0x0c, - 0xfa, 0x5c, 0x11, 0x0f, 0x40, 0x0d, 0xc7, 0xe7, 0x31, 0xbf, 0xa7, 0xee, 0xd8, 0x37, 0x66, 0xb5, - 0x8f, 0xc6, 0xe7, 0xb1, 0xcb, 0xb1, 0x68, 0xc0, 0x5a, 0xe6, 0x7f, 0xa6, 0x91, 0x97, 0x19, 0x0a, - 0x77, 0xa6, 0xda, 0x9a, 0x43, 0x50, 0x8b, 0x3e, 0xbc, 0x0b, 0x4a, 0x18, 0xcc, 0xde, 0x42, 0x09, - 0x03, 0xec, 0xc1, 0xad, 0xc2, 0xa1, 0x6c, 0x66, 0x57, 0xb9, 0x61, 0x2f, 0xc1, 0xd8, 0x0f, 0x82, - 0x13, 0xe7, 0xe4, 0x75, 0x6d, 0xec, 0x8d, 0x13, 0xc0, 0x1e, 0xc3, 0x96, 0x00, 0x5d, 0x3c, 0xe0, - 0x3a, 0xac, 0x50, 0x9a, 0x72, 0x79, 0xcd, 0x2d, 0x96, 0xec, 0x15, 0xec, 0xb8, 0x34, 0x8a, 0x2f, - 0xe8, 0x7f, 0xc5, 0x9e, 0x40, 0x5f, 0x4c, 0x20, 0xd6, 0xdb, 0x85, 0x9d, 0xf2, 0x45, 0x85, 0x7a, - 0xec, 0xa7, 0x0c, 0x7d, 0x71, 0xbd, 0xa0, 0x7b, 0xd7, 0xbc, 0x8d, 0xee, 0x0c, 0x17, 0x9d, 0x12, - 0xe0, 0xec, 0xfa, 0x60, 0xe1, 0xee, 0xe6, 0x33, 0x80, 0xba, 0x34, 0x67, 0x8d, 0xc6, 0xad, 0x41, - 0x50, 0xc7, 0xde, 0x88, 0xb8, 0x33, 0x9a, 0xcb, 0xd7, 0xce, 0x2f, 0x15, 0xd6, 0x4e, 0x29, 0xbd, - 0x08, 0x7d, 0xc2, 0x63, 0xb8, 0xb3, 0x30, 0x54, 0x78, 0xaf, 0xba, 0x89, 0x68, 0x72, 0x4d, 0xb3, - 0xa5, 0x9a, 0x44, 0x97, 0x4c, 0xc2, 0xf7, 0xb0, 0xbe, 0x3c, 0x40, 0x78, 0xbf, 0x42, 0xb4, 0x4c, - 0xa3, 0xb9, 0xdb, 0xde, 0x50, 0xb2, 0x7e, 0x80, 0x8d, 0xe5, 0xfc, 0x66, 0x35, 0x6d, 0xcb, 0x00, - 0xd6, 0xb4, 0xc2, 0xec, 0x33, 0x09, 0x3f, 0xc2, 0x46, 0x23, 0x60, 0x38, 0xa8, 0x50, 0x6d, 0xc9, - 0x35, 0xad, 0x8e, 0x8e, 0x92, 0xf8, 0x13, 0xf4, 0x44, 0x61, 0xc2, 0x87, 0x15, 0xb2, 0x23, 0xab, - 0xe6, 0x83, 0xee, 0xa6, 0x6b, 0x05, 0x51, 0x4e, 0x6a, 0x85, 0x8e, 0x74, 0xd6, 0x0a, 0xad, 0x51, - 0x63, 0xd2, 0x81, 0xf1, 0x7b, 0x62, 0xc9, 0x57, 0x13, 0x4b, 0xfe, 0x3b, 0xb1, 0xe4, 0xef, 0x53, - 0x4b, 0xba, 0x9a, 0x5a, 0xd2, 0x9f, 0xa9, 0x25, 0x9d, 0xad, 0xf2, 0x5f, 0xfc, 0x8b, 0x7f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xb7, 0xed, 0x74, 0x34, 0xef, 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ServiceClient is the client API for Service service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ServiceClient interface { - // SetReplicator for this peer - SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) - AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) -} - -type serviceClient struct { - cc *grpc.ClientConn -} - -func NewServiceClient(cc *grpc.ClientConn) ServiceClient { - return &serviceClient{cc} -} - -func (c *serviceClient) SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) { - out := new(SetReplicatorReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/SetReplicator", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +type GetAllP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (c *serviceClient) DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) { - out := new(DeleteReplicatorReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/DeleteReplicator", in, out, opts...) - if err != nil { - return nil, err +func (x *GetAllP2PCollectionsRequest) Reset() { + *x = GetAllP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -func (c *serviceClient) GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) { - out := new(GetAllReplicatorReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/GetAllReplicators", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *GetAllP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *serviceClient) AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) { - out := new(AddP2PCollectionsReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/AddP2PCollections", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*GetAllP2PCollectionsRequest) ProtoMessage() {} -func (c *serviceClient) RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) { - out := new(RemoveP2PCollectionsReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/RemoveP2PCollections", in, out, opts...) - if err != nil { - return nil, err +func (x *GetAllP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *serviceClient) GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) { - out := new(GetAllP2PCollectionsReply) - err := c.cc.Invoke(ctx, "/api.pb.Service/GetAllP2PCollections", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use GetAllP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{10} } -// ServiceServer is the server API for Service service. -type ServiceServer interface { - // SetReplicator for this peer - SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) - AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) -} +type GetAllP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -// UnimplementedServiceServer can be embedded to have forward compatible implementations. -type UnimplementedServiceServer struct { + Collections []*GetAllP2PCollectionsReply_Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (*UnimplementedServiceServer) SetReplicator(ctx context.Context, req *SetReplicatorRequest) (*SetReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetReplicator not implemented") -} -func (*UnimplementedServiceServer) DeleteReplicator(ctx context.Context, req *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteReplicator not implemented") -} -func (*UnimplementedServiceServer) GetAllReplicators(ctx context.Context, req *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllReplicators not implemented") -} -func (*UnimplementedServiceServer) AddP2PCollections(ctx context.Context, req *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddP2PCollections not implemented") -} -func (*UnimplementedServiceServer) RemoveP2PCollections(ctx context.Context, req *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveP2PCollections not implemented") -} -func (*UnimplementedServiceServer) GetAllP2PCollections(ctx context.Context, req *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllP2PCollections not implemented") +func (x *GetAllP2PCollectionsReply) Reset() { + *x = GetAllP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func RegisterServiceServer(s *grpc.Server, srv ServiceServer) { - s.RegisterService(&_Service_serviceDesc, srv) +func (x *GetAllP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Service_SetReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).SetReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/SetReplicator", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).SetReplicator(ctx, req.(*SetReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} +func (*GetAllP2PCollectionsReply) ProtoMessage() {} -func _Service_DeleteReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).DeleteReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/DeleteReplicator", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).DeleteReplicator(ctx, req.(*DeleteReplicatorRequest)) +func (x *GetAllP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _Service_GetAllReplicators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetAllReplicators(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/GetAllReplicators", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetAllReplicators(ctx, req.(*GetAllReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use GetAllP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{11} } -func _Service_AddP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err +func (x *GetAllP2PCollectionsReply) GetCollections() []*GetAllP2PCollectionsReply_Collection { + if x != nil { + return x.Collections } - if interceptor == nil { - return srv.(ServiceServer).AddP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/AddP2PCollections", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).AddP2PCollections(ctx, req.(*AddP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _Service_RemoveP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).RemoveP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/RemoveP2PCollections", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).RemoveP2PCollections(ctx, req.(*RemoveP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) +type GetAllReplicatorReply_Replicators struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *GetAllReplicatorReply_Replicators_Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` + Schemas []string `protobuf:"bytes,2,rep,name=schemas,proto3" json:"schemas,omitempty"` } -func _Service_GetAllP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err +func (x *GetAllReplicatorReply_Replicators) Reset() { + *x = GetAllReplicatorReply_Replicators{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if interceptor == nil { - return srv.(ServiceServer).GetAllP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.pb.Service/GetAllP2PCollections", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetAllP2PCollections(ctx, req.(*GetAllP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) } -var _Service_serviceDesc = grpc.ServiceDesc{ - ServiceName: "api.pb.Service", - HandlerType: (*ServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SetReplicator", - Handler: _Service_SetReplicator_Handler, - }, - { - MethodName: "DeleteReplicator", - Handler: _Service_DeleteReplicator_Handler, - }, - { - MethodName: "GetAllReplicators", - Handler: _Service_GetAllReplicators_Handler, - }, - { - MethodName: "AddP2PCollections", - Handler: _Service_AddP2PCollections_Handler, - }, - { - MethodName: "RemoveP2PCollections", - Handler: _Service_RemoveP2PCollections_Handler, - }, - { - MethodName: "GetAllP2PCollections", - Handler: _Service_GetAllP2PCollections_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api.proto", +func (x *GetAllReplicatorReply_Replicators) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SetReplicatorRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (*GetAllReplicatorReply_Replicators) ProtoMessage() {} + +func (x *GetAllReplicatorReply_Replicators) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *SetReplicatorRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use GetAllReplicatorReply_Replicators.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply_Replicators) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{5, 0} } -func (m *SetReplicatorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Addr) > 0 { - i -= len(m.Addr) - copy(dAtA[i:], m.Addr) - i = encodeVarintApi(dAtA, i, uint64(len(m.Addr))) - i-- - dAtA[i] = 0x12 +func (x *GetAllReplicatorReply_Replicators) GetInfo() *GetAllReplicatorReply_Replicators_Info { + if x != nil { + return x.Info } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil + return nil } -func (m *SetReplicatorReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply_Replicators) GetSchemas() []string { + if x != nil { + return x.Schemas } - return dAtA[:n], nil + return nil } -func (m *SetReplicatorReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type GetAllReplicatorReply_Replicators_Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *SetReplicatorReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarintApi(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Addrs []byte `protobuf:"bytes,2,opt,name=addrs,proto3" json:"addrs,omitempty"` } -func (m *DeleteReplicatorRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply_Replicators_Info) Reset() { + *x = GetAllReplicatorReply_Replicators_Info{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *DeleteReplicatorRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *GetAllReplicatorReply_Replicators_Info) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteReplicatorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarintApi(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +func (*GetAllReplicatorReply_Replicators_Info) ProtoMessage() {} -func (m *DeleteReplicatorReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply_Replicators_Info) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *DeleteReplicatorReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use GetAllReplicatorReply_Replicators_Info.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply_Replicators_Info) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{5, 0, 0} } -func (m *DeleteReplicatorReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarintApi(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa +func (x *GetAllReplicatorReply_Replicators_Info) GetId() []byte { + if x != nil { + return x.Id } - return len(dAtA) - i, nil + return nil } -func (m *GetAllReplicatorRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply_Replicators_Info) GetAddrs() []byte { + if x != nil { + return x.Addrs } - return dAtA[:n], nil + return nil } -func (m *GetAllReplicatorRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type GetAllP2PCollectionsReply_Collection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *GetAllReplicatorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` } -func (m *GetAllReplicatorReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllReplicatorReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Replicators) > 0 { - for iNdEx := len(m.Replicators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Replicators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintApi(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +func (x *GetAllP2PCollectionsReply_Collection) Reset() { + *x = GetAllP2PCollectionsReply_Collection{} + if protoimpl.UnsafeEnabled { + mi := &file_api_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return len(dAtA) - i, nil } -func (m *GetAllReplicatorReply_Replicators) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply_Replicators) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllReplicatorReply_Replicators) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Schemas) > 0 { - for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Schemas[iNdEx]) - copy(dAtA[i:], m.Schemas[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.Schemas[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Info != nil { - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintApi(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +func (x *GetAllP2PCollectionsReply_Collection) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetAllReplicatorReply_Replicators_Info) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (*GetAllP2PCollectionsReply_Collection) ProtoMessage() {} + +func (x *GetAllP2PCollectionsReply_Collection) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *GetAllReplicatorReply_Replicators_Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use GetAllP2PCollectionsReply_Collection.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsReply_Collection) Descriptor() ([]byte, []int) { + return file_api_proto_rawDescGZIP(), []int{11, 0} } -func (m *GetAllReplicatorReply_Replicators_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Addrs) > 0 { - i -= len(m.Addrs) - copy(dAtA[i:], m.Addrs) - i = encodeVarintApi(dAtA, i, uint64(len(m.Addrs))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa +func (x *GetAllP2PCollectionsReply_Collection) GetId() string { + if x != nil { + return x.Id } - return len(dAtA) - i, nil + return "" } -func (m *AddP2PCollectionsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AddP2PCollectionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } +func (x *GetAllP2PCollectionsReply_Collection) GetName() string { + if x != nil { + return x.Name } - return len(dAtA) - i, nil + return "" } -func (m *AddP2PCollectionsReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AddP2PCollectionsReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarintApi(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarintApi(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarintApi(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Collections[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintApi(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply_Collection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - offset -= sovApi(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *SetReplicatorRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - l = len(m.Addr) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *SetReplicatorReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *DeleteReplicatorRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *DeleteReplicatorReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *GetAllReplicatorRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *GetAllReplicatorReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Replicators) > 0 { - for _, e := range m.Replicators { - l = e.Size() - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *GetAllReplicatorReply_Replicators) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Info != nil { - l = m.Info.Size() - n += 1 + l + sovApi(uint64(l)) - } - if len(m.Schemas) > 0 { - for _, s := range m.Schemas { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *GetAllReplicatorReply_Replicators_Info) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Addrs) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *AddP2PCollectionsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *AddP2PCollectionsReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *RemoveP2PCollectionsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *RemoveP2PCollectionsReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func (m *GetAllP2PCollectionsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *GetAllP2PCollectionsReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, e := range m.Collections { - l = e.Size() - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *GetAllP2PCollectionsReply_Collection) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n -} - -func sovApi(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozApi(x uint64) (n int) { - return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *SetReplicatorRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) - if m.Addr == nil { - m.Addr = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetReplicatorReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteReplicatorRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteReplicatorReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicators", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Replicators = append(m.Replicators, &GetAllReplicatorReply_Replicators{}) - if err := m.Replicators[len(m.Replicators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply_Replicators) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Replicators: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Replicators: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Info == nil { - m.Info = &GetAllReplicatorReply_Replicators_Info{} - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schemas = append(m.Schemas, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply_Replicators_Info) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs[:0], dAtA[iNdEx:postIndex]...) - if m.Addrs == nil { - m.Addrs = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Err = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveP2PCollectionsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveP2PCollectionsReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Err = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, &GetAllP2PCollectionsReply_Collection{}) - if err := m.Collections[len(m.Collections)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsReply_Collection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Collection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Collection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipApi(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthApi - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupApi - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthApi - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var File_api_proto protoreflect.FileDescriptor + +var file_api_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x61, 0x70, 0x69, + 0x2e, 0x70, 0x62, 0x22, 0x4c, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, + 0x72, 0x22, 0x2c, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, + 0x31, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, + 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, + 0x49, 0x44, 0x22, 0x2f, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x44, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x80, + 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x99, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x42, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x1a, 0x2c, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, + 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, + 0x73, 0x22, 0x3c, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, + 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0x2a, 0x0a, 0x16, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x3f, 0x0a, 0x1b, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, 0x19, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1d, 0x0a, 0x1b, 0x47, + 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x19, 0x47, + 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x30, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xa0, 0x04, 0x0a, 0x07, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x53, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x1f, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, + 0x12, 0x57, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x41, + 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, + 0x2e, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x47, + 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, 0x5a, + 0x08, 0x2f, 0x3b, 0x61, 0x70, 0x69, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") + file_api_proto_rawDescOnce sync.Once + file_api_proto_rawDescData = file_api_proto_rawDesc ) + +func file_api_proto_rawDescGZIP() []byte { + file_api_proto_rawDescOnce.Do(func() { + file_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_proto_rawDescData) + }) + return file_api_proto_rawDescData +} + +var file_api_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_api_proto_goTypes = []interface{}{ + (*SetReplicatorRequest)(nil), // 0: api.pb.SetReplicatorRequest + (*SetReplicatorReply)(nil), // 1: api.pb.SetReplicatorReply + (*DeleteReplicatorRequest)(nil), // 2: api.pb.DeleteReplicatorRequest + (*DeleteReplicatorReply)(nil), // 3: api.pb.DeleteReplicatorReply + (*GetAllReplicatorRequest)(nil), // 4: api.pb.GetAllReplicatorRequest + (*GetAllReplicatorReply)(nil), // 5: api.pb.GetAllReplicatorReply + (*AddP2PCollectionsRequest)(nil), // 6: api.pb.AddP2PCollectionsRequest + (*AddP2PCollectionsReply)(nil), // 7: api.pb.AddP2PCollectionsReply + (*RemoveP2PCollectionsRequest)(nil), // 8: api.pb.RemoveP2PCollectionsRequest + (*RemoveP2PCollectionsReply)(nil), // 9: api.pb.RemoveP2PCollectionsReply + (*GetAllP2PCollectionsRequest)(nil), // 10: api.pb.GetAllP2PCollectionsRequest + (*GetAllP2PCollectionsReply)(nil), // 11: api.pb.GetAllP2PCollectionsReply + (*GetAllReplicatorReply_Replicators)(nil), // 12: api.pb.GetAllReplicatorReply.Replicators + (*GetAllReplicatorReply_Replicators_Info)(nil), // 13: api.pb.GetAllReplicatorReply.Replicators.Info + (*GetAllP2PCollectionsReply_Collection)(nil), // 14: api.pb.GetAllP2PCollectionsReply.Collection +} +var file_api_proto_depIdxs = []int32{ + 12, // 0: api.pb.GetAllReplicatorReply.replicators:type_name -> api.pb.GetAllReplicatorReply.Replicators + 14, // 1: api.pb.GetAllP2PCollectionsReply.collections:type_name -> api.pb.GetAllP2PCollectionsReply.Collection + 13, // 2: api.pb.GetAllReplicatorReply.Replicators.info:type_name -> api.pb.GetAllReplicatorReply.Replicators.Info + 0, // 3: api.pb.Service.SetReplicator:input_type -> api.pb.SetReplicatorRequest + 2, // 4: api.pb.Service.DeleteReplicator:input_type -> api.pb.DeleteReplicatorRequest + 4, // 5: api.pb.Service.GetAllReplicators:input_type -> api.pb.GetAllReplicatorRequest + 6, // 6: api.pb.Service.AddP2PCollections:input_type -> api.pb.AddP2PCollectionsRequest + 8, // 7: api.pb.Service.RemoveP2PCollections:input_type -> api.pb.RemoveP2PCollectionsRequest + 10, // 8: api.pb.Service.GetAllP2PCollections:input_type -> api.pb.GetAllP2PCollectionsRequest + 1, // 9: api.pb.Service.SetReplicator:output_type -> api.pb.SetReplicatorReply + 3, // 10: api.pb.Service.DeleteReplicator:output_type -> api.pb.DeleteReplicatorReply + 5, // 11: api.pb.Service.GetAllReplicators:output_type -> api.pb.GetAllReplicatorReply + 7, // 12: api.pb.Service.AddP2PCollections:output_type -> api.pb.AddP2PCollectionsReply + 9, // 13: api.pb.Service.RemoveP2PCollections:output_type -> api.pb.RemoveP2PCollectionsReply + 11, // 14: api.pb.Service.GetAllP2PCollections:output_type -> api.pb.GetAllP2PCollectionsReply + 9, // [9:15] is the sub-list for method output_type + 3, // [3:9] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_api_proto_init() } +func file_api_proto_init() { + if File_api_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply_Replicators); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply_Replicators_Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_api_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsReply_Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_api_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_api_proto_goTypes, + DependencyIndexes: file_api_proto_depIdxs, + MessageInfos: file_api_proto_msgTypes, + }.Build() + File_api_proto = out.File + file_api_proto_rawDesc = nil + file_api_proto_goTypes = nil + file_api_proto_depIdxs = nil +} diff --git a/net/api/pb/api.proto b/net/api/pb/api.proto index df86e31931..367997c7af 100644 --- a/net/api/pb/api.proto +++ b/net/api/pb/api.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package api.pb; +option go_package = "/;api_pb"; + message SetReplicatorRequest { repeated string collections = 1; bytes addr = 2; diff --git a/net/api/pb/api_grpc.pb.go b/net/api/pb/api_grpc.pb.go new file mode 100644 index 0000000000..5d1bc204d3 --- /dev/null +++ b/net/api/pb/api_grpc.pb.go @@ -0,0 +1,300 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.9 +// source: api.proto + +package api_pb + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Service_SetReplicator_FullMethodName = "/api.pb.Service/SetReplicator" + Service_DeleteReplicator_FullMethodName = "/api.pb.Service/DeleteReplicator" + Service_GetAllReplicators_FullMethodName = "/api.pb.Service/GetAllReplicators" + Service_AddP2PCollections_FullMethodName = "/api.pb.Service/AddP2PCollections" + Service_RemoveP2PCollections_FullMethodName = "/api.pb.Service/RemoveP2PCollections" + Service_GetAllP2PCollections_FullMethodName = "/api.pb.Service/GetAllP2PCollections" +) + +// ServiceClient is the client API for Service service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServiceClient interface { + // SetReplicator for this peer + SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) + // DeleteReplicator for this peer + DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) + // DeleteReplicator for this peer + GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) + AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) + RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) + GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) +} + +type serviceClient struct { + cc grpc.ClientConnInterface +} + +func NewServiceClient(cc grpc.ClientConnInterface) ServiceClient { + return &serviceClient{cc} +} + +func (c *serviceClient) SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) { + out := new(SetReplicatorReply) + err := c.cc.Invoke(ctx, Service_SetReplicator_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) { + out := new(DeleteReplicatorReply) + err := c.cc.Invoke(ctx, Service_DeleteReplicator_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) { + out := new(GetAllReplicatorReply) + err := c.cc.Invoke(ctx, Service_GetAllReplicators_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) { + out := new(AddP2PCollectionsReply) + err := c.cc.Invoke(ctx, Service_AddP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) { + out := new(RemoveP2PCollectionsReply) + err := c.cc.Invoke(ctx, Service_RemoveP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) { + out := new(GetAllP2PCollectionsReply) + err := c.cc.Invoke(ctx, Service_GetAllP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ServiceServer is the server API for Service service. +// All implementations must embed UnimplementedServiceServer +// for forward compatibility +type ServiceServer interface { + // SetReplicator for this peer + SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) + // DeleteReplicator for this peer + DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) + // DeleteReplicator for this peer + GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) + AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) + RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) + GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) + mustEmbedUnimplementedServiceServer() +} + +// UnimplementedServiceServer must be embedded to have forward compatible implementations. +type UnimplementedServiceServer struct { +} + +func (UnimplementedServiceServer) SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetReplicator not implemented") +} +func (UnimplementedServiceServer) DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteReplicator not implemented") +} +func (UnimplementedServiceServer) GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAllReplicators not implemented") +} +func (UnimplementedServiceServer) AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddP2PCollections not implemented") +} +func (UnimplementedServiceServer) RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveP2PCollections not implemented") +} +func (UnimplementedServiceServer) GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAllP2PCollections not implemented") +} +func (UnimplementedServiceServer) mustEmbedUnimplementedServiceServer() {} + +// UnsafeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServiceServer will +// result in compilation errors. +type UnsafeServiceServer interface { + mustEmbedUnimplementedServiceServer() +} + +func RegisterServiceServer(s grpc.ServiceRegistrar, srv ServiceServer) { + s.RegisterService(&Service_ServiceDesc, srv) +} + +func _Service_SetReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).SetReplicator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_SetReplicator_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).SetReplicator(ctx, req.(*SetReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_DeleteReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).DeleteReplicator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_DeleteReplicator_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).DeleteReplicator(ctx, req.(*DeleteReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_GetAllReplicators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).GetAllReplicators(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_GetAllReplicators_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).GetAllReplicators(ctx, req.(*GetAllReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_AddP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).AddP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_AddP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).AddP2PCollections(ctx, req.(*AddP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_RemoveP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).RemoveP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_RemoveP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).RemoveP2PCollections(ctx, req.(*RemoveP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_GetAllP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).GetAllP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_GetAllP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).GetAllP2PCollections(ctx, req.(*GetAllP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Service_ServiceDesc is the grpc.ServiceDesc for Service service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Service_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "api.pb.Service", + HandlerType: (*ServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SetReplicator", + Handler: _Service_SetReplicator_Handler, + }, + { + MethodName: "DeleteReplicator", + Handler: _Service_DeleteReplicator_Handler, + }, + { + MethodName: "GetAllReplicators", + Handler: _Service_GetAllReplicators_Handler, + }, + { + MethodName: "AddP2PCollections", + Handler: _Service_AddP2PCollections_Handler, + }, + { + MethodName: "RemoveP2PCollections", + Handler: _Service_RemoveP2PCollections_Handler, + }, + { + MethodName: "GetAllP2PCollections", + Handler: _Service_GetAllP2PCollections_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} diff --git a/net/api/pb/api_vtproto.pb.go b/net/api/pb/api_vtproto.pb.go new file mode 100644 index 0000000000..e4ddfb9bcb --- /dev/null +++ b/net/api/pb/api_vtproto.pb.go @@ -0,0 +1,2316 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: api.proto + +package api_pb + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SetReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Addr) > 0 { + i -= len(m.Addr) + copy(dAtA[i:], m.Addr) + i = encodeVarint(dAtA, i, uint64(len(m.Addr))) + i-- + dAtA[i] = 0x12 + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SetReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Addrs) > 0 { + i -= len(m.Addrs) + copy(dAtA[i:], m.Addrs) + i = encodeVarint(dAtA, i, uint64(len(m.Addrs))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply_Replicators) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply_Replicators) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply_Replicators) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Schemas) > 0 { + for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Schemas[iNdEx]) + copy(dAtA[i:], m.Schemas[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Schemas[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Info != nil { + size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Replicators) > 0 { + for iNdEx := len(m.Replicators) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Replicators[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AddP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AddP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Err) > 0 { + i -= len(m.Err) + copy(dAtA[i:], m.Err) + i = encodeVarint(dAtA, i, uint64(len(m.Err))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RemoveP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Err) > 0 { + i -= len(m.Err) + copy(dAtA[i:], m.Err) + i = encodeVarint(dAtA, i, uint64(len(m.Err))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Collections[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SetReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply_Replicators_Info) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Addrs) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply_Replicators) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Schemas) > 0 { + for _, s := range m.Schemas { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Replicators) > 0 { + for _, e := range m.Replicators { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AddP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AddP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Err) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Err) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsReply_Collection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, e := range m.Collections { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) + if m.Addr == nil { + m.Addr = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) + if m.Id == nil { + m.Id = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addrs = append(m.Addrs[:0], dAtA[iNdEx:postIndex]...) + if m.Addrs == nil { + m.Addrs = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = &GetAllReplicatorReply_Replicators_Info{} + } + if err := m.Info.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schemas = append(m.Schemas, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Replicators = append(m.Replicators, &GetAllReplicatorReply_Replicators{}) + if err := m.Replicators[len(m.Replicators)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, &GetAllP2PCollectionsReply_Collection{}) + if err := m.Collections[len(m.Collections)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/net/api/service.go b/net/api/service.go deleted file mode 100644 index 2873eab65b..0000000000 --- a/net/api/service.go +++ /dev/null @@ -1,158 +0,0 @@ -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "context" - - libpeer "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/sourcenetwork/defradb/logging" - "github.com/sourcenetwork/defradb/net" - pb "github.com/sourcenetwork/defradb/net/api/pb" -) - -var ( - log = logging.MustNewLogger("netapi") -) - -type Service struct { - peer *net.Peer -} - -func NewService(peer *net.Peer) *Service { - return &Service{peer: peer} -} - -func (s *Service) SetReplicator( - ctx context.Context, - req *pb.SetReplicatorRequest, -) (*pb.SetReplicatorReply, error) { - log.Debug(ctx, "Received SetReplicator request") - - addr, err := ma.NewMultiaddrBytes(req.Addr) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - pid, err := s.peer.SetReplicator(ctx, addr, req.Collections...) - if err != nil { - return nil, err - } - return &pb.SetReplicatorReply{ - PeerID: marshalPeerID(pid), - }, nil -} - -func (s *Service) DeleteReplicator( - ctx context.Context, - req *pb.DeleteReplicatorRequest, -) (*pb.DeleteReplicatorReply, error) { - log.Debug(ctx, "Received DeleteReplicator request") - err := s.peer.DeleteReplicator(ctx, libpeer.ID(req.PeerID)) - if err != nil { - return nil, err - } - return &pb.DeleteReplicatorReply{ - PeerID: req.PeerID, - }, nil -} - -func (s *Service) GetAllReplicators( - ctx context.Context, - req *pb.GetAllReplicatorRequest, -) (*pb.GetAllReplicatorReply, error) { - log.Debug(ctx, "Received GetAllReplicators request") - - reps, err := s.peer.GetAllReplicators(ctx) - if err != nil { - return nil, err - } - - pbReps := []*pb.GetAllReplicatorReply_Replicators{} - for _, rep := range reps { - pbReps = append(pbReps, &pb.GetAllReplicatorReply_Replicators{ - Info: &pb.GetAllReplicatorReply_Replicators_Info{ - Id: []byte(rep.Info.ID), - Addrs: rep.Info.Addrs[0].Bytes(), - }, - Schemas: rep.Schemas, - }) - } - - return &pb.GetAllReplicatorReply{ - Replicators: pbReps, - }, nil -} - -func marshalPeerID(id libpeer.ID) []byte { - b, _ := id.Marshal() // This will never return an error - return b -} - -// RemoveP2PCollections handles the request to add P2P collecctions to the stored list. -func (s *Service) AddP2PCollections( - ctx context.Context, - req *pb.AddP2PCollectionsRequest, -) (*pb.AddP2PCollectionsReply, error) { - log.Debug(ctx, "Received AddP2PCollections request") - - err := s.peer.AddP2PCollections(req.Collections) - if err != nil { - return nil, err - } - - return &pb.AddP2PCollectionsReply{}, nil -} - -// RemoveP2PCollections handles the request to remove P2P collecctions from the stored list. -func (s *Service) RemoveP2PCollections( - ctx context.Context, - req *pb.RemoveP2PCollectionsRequest, -) (*pb.RemoveP2PCollectionsReply, error) { - log.Debug(ctx, "Received RemoveP2PCollections request") - - err := s.peer.RemoveP2PCollections(req.Collections) - if err != nil { - return nil, err - } - - return &pb.RemoveP2PCollectionsReply{}, nil -} - -// GetAllP2PCollections handles the request to get all P2P collecctions from the stored list. -func (s *Service) GetAllP2PCollections( - ctx context.Context, - req *pb.GetAllP2PCollectionsRequest, -) (*pb.GetAllP2PCollectionsReply, error) { - log.Debug(ctx, "Received GetAllP2PCollections request") - collections, err := s.peer.GetAllP2PCollections() - if err != nil { - return nil, err - } - - var pbCols []*pb.GetAllP2PCollectionsReply_Collection - for _, col := range collections { - pbCols = append(pbCols, &pb.GetAllP2PCollectionsReply_Collection{ - Id: col.ID, - Name: col.Name, - }) - } - - return &pb.GetAllP2PCollectionsReply{ - Collections: pbCols, - }, nil -} diff --git a/net/client.go b/net/client.go index 2cbf246441..e38df0ed54 100644 --- a/net/client.go +++ b/net/client.go @@ -14,12 +14,10 @@ package net import ( "context" - "fmt" "time" "github.com/libp2p/go-libp2p/core/peer" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/logging" @@ -35,20 +33,16 @@ var ( // pushLog creates a pushLog request and sends it to another node // over libp2p grpc connection func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) error { - dockey, err := client.NewDocKeyFromString(evt.DocKey) - if err != nil { - return errors.Wrap("failed to get DocKey from broadcast message", err) - } log.Debug( ctx, "Preparing pushLog request", - logging.NewKV("DocKey", dockey), + logging.NewKV("DocKey", evt.DocKey), logging.NewKV("CID", evt.Cid), logging.NewKV("SchemaId", evt.SchemaID)) body := &pb.PushLogRequest_Body{ - DocKey: &pb.ProtoDocKey{DocKey: dockey}, - Cid: &pb.ProtoCid{Cid: evt.Cid}, + DocKey: []byte(evt.DocKey), + Cid: evt.Cid.Bytes(), SchemaID: []byte(evt.SchemaID), Creator: s.peer.host.ID().String(), Log: &pb.Document_Log{ @@ -61,20 +55,26 @@ func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) er log.Debug( ctx, "Pushing log", - logging.NewKV("DocKey", dockey), + logging.NewKV("DocKey", evt.DocKey), logging.NewKV("CID", evt.Cid), - logging.NewKV("PeerID", pid)) + logging.NewKV("PeerID", pid), + ) - client, err := s.dial(pid) // grpc dial over p2p stream + client, err := s.dial(pid) // grpc dial over P2P stream if err != nil { - return errors.Wrap("failed to push log", err) + return NewErrPushLog(err) } cctx, cancel := context.WithTimeout(ctx, PushTimeout) defer cancel() if _, err := client.PushLog(cctx, req); err != nil { - return errors.Wrap(fmt.Sprintf("Failed PushLog RPC request %s for %s to %s", evt.Cid, dockey, pid), err) + return NewErrPushLog( + err, + errors.NewKV("CID", evt.Cid), + errors.NewKV("DocKey", evt.DocKey), + errors.NewKV("PeerID", pid), + ) } return nil } diff --git a/net/client_test.go b/net/client_test.go new file mode 100644 index 0000000000..e28c543175 --- /dev/null +++ b/net/client_test.go @@ -0,0 +1,121 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + "testing" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/events" +) + +func TestPushlogWithDialFailure(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + doc, err := client.NewDocFromJSON([]byte(`{"test": "test"}`)) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + n.server.opts = append( + n.server.opts, + grpc.WithTransportCredentials(nil), + grpc.WithCredentialsBundle(nil), + ) + + err = n.server.pushLog(ctx, events.Update{ + DocKey: doc.Key().String(), + Cid: cid, + SchemaID: "test", + Block: &EmptyNode{}, + Priority: 1, + }, peer.ID("some-peer-id")) + require.Contains(t, err.Error(), "no transport security set") +} + +func TestPushlogWithInvalidPeerID(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + doc, err := client.NewDocFromJSON([]byte(`{"test": "test"}`)) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + err = n.server.pushLog(ctx, events.Update{ + DocKey: doc.Key().String(), + Cid: cid, + SchemaID: "test", + Block: &EmptyNode{}, + Priority: 1, + }, peer.ID("some-peer-id")) + require.Contains(t, err.Error(), "failed to parse peer ID") +} + +func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { + ctx := context.Background() + _, n1 := newTestNode(ctx, t) + n1.Start() + _, n2 := newTestNode(ctx, t) + n2.Start() + + err := n1.host.Connect(ctx, peer.AddrInfo{ + ID: n2.PeerID(), + Addrs: []ma.Multiaddr{ + n2.host.Addrs()[0], + }, + }) + require.NoError(t, err) + + _, err = n1.db.AddSchema(ctx, `type User { + name: String + }`) + require.NoError(t, err) + + _, err = n2.db.AddSchema(ctx, `type User { + name: String + }`) + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "test"}`)) + require.NoError(t, err) + + col, err := n1.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + err = col.Save(ctx, doc) + require.NoError(t, err) + + col, err = n2.db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + err = col.Save(ctx, doc) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + err = n1.server.pushLog(ctx, events.Update{ + DocKey: doc.Key().String(), + Cid: cid, + SchemaID: col.SchemaID(), + Block: &EmptyNode{}, + Priority: 1, + }, n2.PeerID()) + require.NoError(t, err) +} diff --git a/node/config.go b/net/config.go similarity index 75% rename from node/config.go rename to net/config.go index f660a4a8b5..28fd73f25e 100644 --- a/node/config.go +++ b/net/config.go @@ -10,7 +10,7 @@ /* Node configuration, in which NodeOpt functions are applied on Options. */ -package node +package net import ( "time" @@ -19,6 +19,8 @@ import ( "github.com/libp2p/go-libp2p/p2p/net/connmgr" ma "github.com/multiformats/go-multiaddr" "google.golang.org/grpc" + + "github.com/sourcenetwork/defradb/config" ) // Options is the node options. @@ -58,8 +60,31 @@ func NewConnManager(low int, high int, grace time.Duration) (cconnmgr.ConnManage return c, nil } +// WithConfig provides the Node-specific configuration, from the top-level Net config. +func WithConfig(cfg *config.Config) NodeOpt { + return func(opt *Options) error { + var err error + err = WithListenP2PAddrStrings(cfg.Net.P2PAddress)(opt) + if err != nil { + return err + } + err = WithListenTCPAddrString(cfg.Net.TCPAddress)(opt) + if err != nil { + return err + } + opt.EnableRelay = cfg.Net.RelayEnabled + opt.EnablePubSub = cfg.Net.PubSubEnabled + opt.DataPath = cfg.Datastore.Badger.Path + opt.ConnManager, err = NewConnManager(100, 400, time.Second*20) + if err != nil { + return err + } + return nil + } +} + // DataPath sets the data path. -func DataPath(path string) NodeOpt { +func WithDataPath(path string) NodeOpt { return func(opt *Options) error { opt.DataPath = path return nil @@ -83,7 +108,7 @@ func WithEnableRelay(enable bool) NodeOpt { } // ListenP2PAddrStrings sets the address to listen on given as strings. -func ListenP2PAddrStrings(addrs ...string) NodeOpt { +func WithListenP2PAddrStrings(addrs ...string) NodeOpt { return func(opt *Options) error { for _, addrstr := range addrs { a, err := ma.NewMultiaddr(addrstr) @@ -97,7 +122,7 @@ func ListenP2PAddrStrings(addrs ...string) NodeOpt { } // ListenTCPAddrString sets the TCP address to listen on, as Multiaddr. -func ListenTCPAddrString(addr string) NodeOpt { +func WithListenTCPAddrString(addr string) NodeOpt { return func(opt *Options) error { a, err := ma.NewMultiaddr(addr) if err != nil { @@ -109,7 +134,7 @@ func ListenTCPAddrString(addr string) NodeOpt { } // ListenAddrs sets the address to listen on given as MultiAddr(s). -func ListenAddrs(addrs ...ma.Multiaddr) NodeOpt { +func WithListenAddrs(addrs ...ma.Multiaddr) NodeOpt { return func(opt *Options) error { opt.ListenAddrs = addrs return nil diff --git a/net/config_test.go b/net/config_test.go new file mode 100644 index 0000000000..bffc19aead --- /dev/null +++ b/net/config_test.go @@ -0,0 +1,127 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "testing" + "time" + + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/config" +) + +func TestNewMergedOptionsSimple(t *testing.T) { + opt, err := NewMergedOptions() + require.NoError(t, err) + require.NotNil(t, opt) +} + +func TestNewMergedOptionsWithNilOption(t *testing.T) { + opt, err := NewMergedOptions(nil) + require.NoError(t, err) + require.NotNil(t, opt) +} + +func TestNewConnManagerSimple(t *testing.T) { + conMngr, err := NewConnManager(1, 10, time.Second) + require.NoError(t, err) + err = conMngr.Close() + require.NoError(t, err) +} + +func TestNewConnManagerWithError(t *testing.T) { + _, err := NewConnManager(1, 10, -time.Second) + require.Contains(t, err.Error(), "grace period must be non-negative") +} + +func TestWithConfigWithP2PAddressError(t *testing.T) { + cfg := config.Config{ + Net: &config.NetConfig{ + P2PAddress: "/willerror/0.0.0.0/tcp/9999", + }, + } + err := WithConfig(&cfg)(&Options{}) + require.Contains(t, err.Error(), "failed to parse multiaddr") +} + +func TestWithConfigWitTCPAddressError(t *testing.T) { + cfg := config.Config{ + Net: &config.NetConfig{ + P2PAddress: "/ip4/0.0.0.0/tcp/9999", + TCPAddress: "/willerror/0.0.0.0/tcp/9999", + }, + } + err := WithConfig(&cfg)(&Options{}) + require.Contains(t, err.Error(), "failed to parse multiaddr") +} + +func TestWithDataPath(t *testing.T) { + path := "test/path" + opt, err := NewMergedOptions(WithDataPath(path)) + require.NoError(t, err) + require.NotNil(t, opt) + require.Equal(t, path, opt.DataPath) +} + +func TestWithPubSub(t *testing.T) { + opt, err := NewMergedOptions(WithPubSub(true)) + require.NoError(t, err) + require.NotNil(t, opt) + require.True(t, opt.EnablePubSub) +} + +func TestWithEnableRelay(t *testing.T) { + opt, err := NewMergedOptions(WithEnableRelay(true)) + require.NoError(t, err) + require.NotNil(t, opt) + require.True(t, opt.EnableRelay) +} + +func TestWithListenP2PAddrStringsWithError(t *testing.T) { + addr := "/willerror/0.0.0.0/tcp/9999" + _, err := NewMergedOptions(WithListenP2PAddrStrings(addr)) + require.Contains(t, err.Error(), "failed to parse multiaddr") +} + +func TestWithListenP2PAddrStrings(t *testing.T) { + addr := "/ip4/0.0.0.0/tcp/9999" + opt, err := NewMergedOptions(WithListenP2PAddrStrings(addr)) + require.NoError(t, err) + require.NotNil(t, opt) + require.Equal(t, addr, opt.ListenAddrs[0].String()) +} + +func TestWithListenTCPAddrStringWithError(t *testing.T) { + addr := "/willerror/0.0.0.0/tcp/9999" + _, err := NewMergedOptions(WithListenTCPAddrString(addr)) + require.Contains(t, err.Error(), "failed to parse multiaddr") +} + +func TestWithListenTCPAddrString(t *testing.T) { + addr := "/ip4/0.0.0.0/tcp/9999" + opt, err := NewMergedOptions(WithListenTCPAddrString(addr)) + require.NoError(t, err) + require.NotNil(t, opt) + require.Equal(t, addr, opt.TCPAddr.String()) +} + +func TestWithListenAddrs(t *testing.T) { + addr := "/ip4/0.0.0.0/tcp/9999" + a, err := ma.NewMultiaddr(addr) + require.NoError(t, err) + + opt, err := NewMergedOptions(WithListenAddrs(a)) + require.NoError(t, err) + require.NotNil(t, opt) + require.Equal(t, addr, opt.ListenAddrs[0].String()) +} diff --git a/net/dag.go b/net/dag.go index 1fedd9301e..d814630f6a 100644 --- a/net/dag.go +++ b/net/dag.go @@ -30,7 +30,7 @@ var ( DAGSyncTimeout = time.Second * 60 ) -// A DAGSyncer is an abstraction to an IPLD-based p2p storage layer. A +// A DAGSyncer is an abstraction to an IPLD-based P2P storage layer. A // DAGSyncer is a DAGService with the ability to publish new ipld nodes to the // network, and retrieving others from it. type DAGSyncer interface { @@ -55,7 +55,7 @@ type dagJob struct { node ipld.Node // the current ipld Node collection client.Collection // collection our document belongs to - dockey core.DataStoreKey // dockey of our document + dsKey core.DataStoreKey // datastore key of our document fieldName string // field of the subgraph our node belongs to // Transaction common to a pushlog event. It is used to pass it along to processLog @@ -87,12 +87,13 @@ func (p *Peer) sendJobWorker() { return case newJob := <-p.sendJobs: - jobs, ok := docWorkerQueue[newJob.dockey.DocKey] + jobs, ok := docWorkerQueue[newJob.dsKey.DocKey] if !ok { jobs = make(chan *dagJob, numWorkers) for i := 0; i < numWorkers; i++ { go p.dagWorker(jobs) } + docWorkerQueue[newJob.dsKey.DocKey] = jobs } jobs <- newJob @@ -112,7 +113,7 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { log.Debug( p.ctx, "Starting new job from DAG queue", - logging.NewKV("DocKey", job.dockey), + logging.NewKV("Datastore Key", job.dsKey), logging.NewKV("CID", job.node.Cid()), ) @@ -128,7 +129,7 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { p.ctx, job.txn, job.collection, - job.dockey, + job.dsKey, job.node.Cid(), job.fieldName, job.node, @@ -140,7 +141,7 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { p.ctx, "Error processing log", err, - logging.NewKV("DocKey", job.dockey), + logging.NewKV("Datastore key", job.dsKey), logging.NewKV("CID", job.node.Cid()), ) job.session.Done() @@ -157,7 +158,7 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { j.session, j.txn, j.collection, - j.dockey, + j.dsKey, j.fieldName, j.node, children, diff --git a/net/dag_test.go b/net/dag_test.go new file mode 100644 index 0000000000..d0e9a18ce7 --- /dev/null +++ b/net/dag_test.go @@ -0,0 +1,398 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + "sync" + "testing" + "time" + + dag "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/go-cid" + format "github.com/ipfs/go-ipld-format" + ipld "github.com/ipfs/go-ipld-format" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/core/crdt" + netutils "github.com/sourcenetwork/defradb/net/utils" +) + +const timeout = 5 * time.Second + +func TestSendJobWorker_ExitOnContextClose_NoError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + done := make(chan struct{}) + go func() { + n.sendJobWorker() + close(done) + }() + err := n.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func TestSendJobWorker_WithNewJobWithClosePriorToProcessing_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + done := make(chan struct{}) + go func() { + n.sendJobWorker() + close(done) + }() + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + dsKey := core.DataStoreKeyFromDocKey(doc.Key()) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + n.sendJobs <- &dagJob{ + session: &wg, + node: &EmptyNode{}, + collection: col, + dsKey: dsKey, + txn: txn, + } + + err = n.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func TestSendJobWorker_WithNewJob_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + done := make(chan struct{}) + go func() { + n.sendJobWorker() + close(done) + }() + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + dsKey := core.DataStoreKeyFromDocKey(doc.Key()) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + n.sendJobs <- &dagJob{ + session: &wg, + node: &EmptyNode{}, + collection: col, + dsKey: dsKey, + txn: txn, + } + // Give the jobworker time to process the job. + time.Sleep(100 * time.Microsecond) + err = n.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func TestSendJobWorker_WithCloseJob_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + done := make(chan struct{}) + go func() { + n.sendJobWorker() + close(done) + }() + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + dsKey := core.DataStoreKeyFromDocKey(doc.Key()) + + txn, err := db.NewTxn(ctx, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + n.sendJobs <- &dagJob{ + session: &wg, + node: &EmptyNode{}, + collection: col, + dsKey: dsKey, + txn: txn, + } + + n.closeJob <- dsKey.DocKey + + err = n.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func TestSendJobWorker_WithPeerAndNoChildren_NoError(t *testing.T) { + ctx := context.Background() + db1, n1 := newTestNode(ctx, t) + db2, n2 := newTestNode(ctx, t) + + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + require.NoError(t, err) + n2.Boostrap(addrs) + + done := make(chan struct{}) + go func() { + n2.sendJobWorker() + close(done) + }() + + _, err = db1.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + _, err = db2.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db1.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + dsKey := core.DataStoreKeyFromDocKey(doc.Key()) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + txn, err := db2.NewTxn(ctx, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{}) + require.NoError(t, err) + + var getter format.NodeGetter = n2.Peer.newDAGSyncerTxn(txn) + if sessionMaker, ok := getter.(SessionDAGSyncer); ok { + log.Debug(ctx, "Upgrading DAGSyncer with a session") + getter = sessionMaker.Session(ctx) + } + + n2.sendJobs <- &dagJob{ + session: &wg, + nodeGetter: getter, + node: node, + collection: col, + dsKey: dsKey, + txn: txn, + } + // Give the jobworker time to process the job. + time.Sleep(100 * time.Microsecond) + err = n1.Close() + require.NoError(t, err) + err = n2.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func TestSendJobWorker_WithPeerAndChildren_NoError(t *testing.T) { + ctx := context.Background() + db1, n1 := newTestNode(ctx, t) + db2, n2 := newTestNode(ctx, t) + + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + require.NoError(t, err) + n2.Boostrap(addrs) + + done := make(chan struct{}) + go func() { + n2.sendJobWorker() + close(done) + }() + + _, err = db1.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + _, err = db2.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db1.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + dsKey := core.DataStoreKeyFromDocKey(doc.Key()) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + txn, err := db2.NewTxn(ctx, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + + links := []core.DAGLink{} + for k := range doc.Fields() { + delta := &crdt.LWWRegDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + FieldName: k, + } + + node, err := makeNode(delta, []cid.Cid{}) + require.NoError(t, err) + + links = append(links, core.DAGLink{ + Name: k, + Cid: node.Cid(), + }) + } + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + SubDAGs: links, + } + + node, err := makeNode(delta, []cid.Cid{}) + require.NoError(t, err) + + var getter format.NodeGetter = n2.Peer.newDAGSyncerTxn(txn) + if sessionMaker, ok := getter.(SessionDAGSyncer); ok { + log.Debug(ctx, "Upgrading DAGSyncer with a session") + getter = sessionMaker.Session(ctx) + } + + n2.sendJobs <- &dagJob{ + session: &wg, + nodeGetter: getter, + node: node, + collection: col, + dsKey: dsKey, + txn: txn, + } + // Give the jobworker time to process the job. + time.Sleep(100 * time.Microsecond) + err = n1.Close() + require.NoError(t, err) + err = n2.Close() + require.NoError(t, err) + select { + case <-done: + case <-time.After(timeout): + t.Error("failed to close sendJobWorker") + } +} + +func makeNode(delta core.Delta, heads []cid.Cid) (ipld.Node, error) { + var data []byte + var err error + if delta != nil { + data, err = delta.Marshal() + if err != nil { + return nil, err + } + } + + nd := dag.NodeWithData(data) + // The cid builder defaults to v0, we want to be using v1 Cids + err = nd.SetCidBuilder(cid.V1Builder{ + Codec: cid.DagProtobuf, + MhType: mh.SHA2_256, + MhLength: -1, + }) + if err != nil { + return nil, err + } + + // add heads + for _, h := range heads { + if err = nd.AddRawLink("_head", &ipld.Link{Cid: h}); err != nil { + return nil, err + } + } + + // add delta specific links + if comp, ok := delta.(core.CompositeDelta); ok { + for _, dagLink := range comp.Links() { + if err = nd.AddRawLink(dagLink.Name, &ipld.Link{Cid: dagLink.Cid}); err != nil { + return nil, err + } + } + } + return nd, nil +} diff --git a/net/dialer.go b/net/dialer.go index be1140a2b5..16ac03d03f 100644 --- a/net/dialer.go +++ b/net/dialer.go @@ -20,9 +20,7 @@ import ( gostream "github.com/libp2p/go-libp2p-gostream" libpeer "github.com/libp2p/go-libp2p/core/peer" "google.golang.org/grpc" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/status" corenet "github.com/sourcenetwork/defradb/core/net" "github.com/sourcenetwork/defradb/errors" @@ -41,8 +39,7 @@ func (s *server) dial(peerID libpeer.ID) (pb.ServiceClient, error) { conn, ok := s.conns[peerID] if ok { if conn.GetState() == connectivity.Shutdown { - if err := conn.Close(); err != nil && status.Code(err) != codes.Canceled { - // log.Errorf("error closing connection: %v", err) + if err := conn.Close(); err != nil { return nil, err } } else { diff --git a/net/dialer_test.go b/net/dialer_test.go new file mode 100644 index 0000000000..5e01b2384f --- /dev/null +++ b/net/dialer_test.go @@ -0,0 +1,114 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + netutils "github.com/sourcenetwork/defradb/net/utils" +) + +func TestDial_WithConnectedPeer_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + ctx := context.Background() + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + n2, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) + _, err = n1.server.dial(n2.PeerID()) + require.NoError(t, err) +} + +func TestDial_WithConnectedPeerAndSecondConnection_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + ctx := context.Background() + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + n2, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) + _, err = n1.server.dial(n2.PeerID()) + require.NoError(t, err) + + _, err = n1.server.dial(n2.PeerID()) + require.NoError(t, err) +} + +func TestDial_WithConnectedPeerAndSecondConnectionWithConnectionShutdown_ClosingConnectionError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + ctx := context.Background() + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + n2, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + assert.NoError(t, err) + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) + _, err = n1.server.dial(n2.PeerID()) + require.NoError(t, err) + + err = n1.server.conns[n2.PeerID()].Close() + require.NoError(t, err) + + _, err = n1.server.dial(n2.PeerID()) + require.Contains(t, err.Error(), "grpc: the client connection is closing") +} diff --git a/net/doc.go b/net/doc.go index ac33d9a2d1..dd80ee53a4 100644 --- a/net/doc.go +++ b/net/doc.go @@ -11,7 +11,7 @@ // limitations under the License. /* -Package net provides p2p network functions for the core DefraDB instance. +Package net provides P2P network functions for the core DefraDB instance. Notable design descision: all DocKeys (Documents) have their own respective PubSub topics. diff --git a/net/errors.go b/net/errors.go new file mode 100644 index 0000000000..3f8d4926c5 --- /dev/null +++ b/net/errors.go @@ -0,0 +1,49 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "fmt" + + "github.com/sourcenetwork/defradb/errors" +) + +const ( + errPushLog = "failed to push log" + errFailedToGetDockey = "failed to get DocKey from broadcast message" + errPublishingToDockeyTopic = "can't publish log %s for dockey %s" + errPublishingToSchemaTopic = "can't publish log %s for schema %s" +) + +var ( + ErrPeerConnectionWaitTimout = errors.New("waiting for peer connection timed out") + ErrPubSubWaitTimeout = errors.New("waiting for pubsub timed out") + ErrPushLogWaitTimeout = errors.New("waiting for pushlog timed out") + ErrNilDB = errors.New("database object can't be nil") + ErrNilUpdateChannel = errors.New("tried to subscribe to update channel, but update channel is nil") + ErrSelfTargetForReplicator = errors.New("can't target ourselves as a replicator") +) + +func NewErrPushLog(inner error, kv ...errors.KV) error { + return errors.Wrap(errPushLog, inner, kv...) +} + +func NewErrFailedToGetDockey(inner error, kv ...errors.KV) error { + return errors.Wrap(errFailedToGetDockey, inner, kv...) +} + +func NewErrPublishingToDockeyTopic(inner error, cid, key string, kv ...errors.KV) error { + return errors.Wrap(fmt.Sprintf(errPublishingToDockeyTopic, cid, key), inner, kv...) +} + +func NewErrPublishingToSchemaTopic(inner error, cid, key string, kv ...errors.KV) error { + return errors.Wrap(fmt.Sprintf(errPublishingToSchemaTopic, cid, key), inner, kv...) +} diff --git a/node/node.go b/net/node.go similarity index 90% rename from node/node.go rename to net/node.go index 1df5da70e4..8f916cda16 100644 --- a/node/node.go +++ b/net/node.go @@ -1,4 +1,4 @@ -// Copyright 2022 Democratized Data Foundation +// Copyright 2023 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -14,7 +14,7 @@ and GRPC server. Basically it combines db/DB, net/Peer, and net/Server into a single Node object. */ -package node +package net import ( "context" @@ -44,36 +44,26 @@ import ( "github.com/textileio/go-libp2p-pubsub-rpc/finalizer" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" - "github.com/sourcenetwork/defradb/net" ) -var ( - log = logging.MustNewLogger("node") -) - -const evtWaitTimeout = 10 * time.Second +var evtWaitTimeout = 10 * time.Second // Node is a networked peer instance of DefraDB. type Node struct { // embed the DB interface into the node client.DB - *net.Peer - - host host.Host - dht routing.Routing - pubsub *pubsub.PubSub + *Peer // receives an event when the status of a peer connection changes. peerEvent chan event.EvtPeerConnectednessChanged // receives an event when a pubsub topic is added. - pubSubEvent chan net.EvtPubSub + pubSubEvent chan EvtPubSub // receives an event when a pushLog request has been processed. - pushLogEvent chan net.EvtReceivedPushLog + pushLogEvent chan EvtReceivedPushLog ctx context.Context cancel context.CancelFunc @@ -95,7 +85,7 @@ func NewNode( // create our peerstore from the underlying defra rootstore // prefixed with "p2p" rootstore := db.Root() - pstore := namespace.Wrap(rootstore, ds.NewKey("peers")) + pstore := namespace.Wrap(rootstore, ds.NewKey("/db")) peerstore, err := pstoreds.NewPeerstore(ctx, pstore, pstoreds.DefaultOpts()) if err != nil { return nil, fin.Cleanup(err) @@ -158,7 +148,7 @@ func NewNode( ctx, cancel := context.WithCancel(ctx) - peer, err := net.NewPeer( + peer, err := NewPeer( ctx, db, h, @@ -180,13 +170,10 @@ func NewNode( // test, but we should resolve this when we can (e.g. via using subscribe-like // mechanics, potentially via use of a ring-buffer based [events.Channel] // implementation): https://github.com/sourcenetwork/defradb/issues/1358. - pubSubEvent: make(chan net.EvtPubSub, 20), - pushLogEvent: make(chan net.EvtReceivedPushLog, 20), + pubSubEvent: make(chan EvtPubSub, 20), + pushLogEvent: make(chan EvtReceivedPushLog, 20), peerEvent: make(chan event.EvtPeerConnectednessChanged, 20), Peer: peer, - host: h, - dht: ddht, - pubsub: ps, DB: db, ctx: ctx, cancel: cancel, @@ -249,6 +236,7 @@ func (n *Node) subscribeToPeerConnectionEvents() { n.ctx, fmt.Sprintf("failed to subscribe to peer connectedness changed event: %v", err), ) + return } go func() { for e := range sub.Out() { @@ -264,20 +252,21 @@ func (n *Node) subscribeToPeerConnectionEvents() { // subscribeToPubSubEvents subscribes the node to the event bus for a pubsub. func (n *Node) subscribeToPubSubEvents() { - sub, err := n.host.EventBus().Subscribe(new(net.EvtPubSub)) + sub, err := n.host.EventBus().Subscribe(new(EvtPubSub)) if err != nil { log.Info( n.ctx, fmt.Sprintf("failed to subscribe to pubsub event: %v", err), ) + return } go func() { for e := range sub.Out() { select { - case n.pubSubEvent <- e.(net.EvtPubSub): + case n.pubSubEvent <- e.(EvtPubSub): default: <-n.pubSubEvent - n.pubSubEvent <- e.(net.EvtPubSub) + n.pubSubEvent <- e.(EvtPubSub) } } }() @@ -285,20 +274,21 @@ func (n *Node) subscribeToPubSubEvents() { // subscribeToPushLogEvents subscribes the node to the event bus for a push log request completion. func (n *Node) subscribeToPushLogEvents() { - sub, err := n.host.EventBus().Subscribe(new(net.EvtReceivedPushLog)) + sub, err := n.host.EventBus().Subscribe(new(EvtReceivedPushLog)) if err != nil { log.Info( n.ctx, fmt.Sprintf("failed to subscribe to push log event: %v", err), ) + return } go func() { for e := range sub.Out() { select { - case n.pushLogEvent <- e.(net.EvtReceivedPushLog): + case n.pushLogEvent <- e.(EvtReceivedPushLog): default: <-n.pushLogEvent - n.pushLogEvent <- e.(net.EvtReceivedPushLog) + n.pushLogEvent <- e.(EvtReceivedPushLog) } } }() @@ -317,7 +307,7 @@ func (n *Node) WaitForPeerConnectionEvent(id peer.ID) error { } return nil case <-time.After(evtWaitTimeout): - return errors.New("waiting for peer connection timed out") + return ErrPeerConnectionWaitTimout case <-n.ctx.Done(): return nil } @@ -334,7 +324,7 @@ func (n *Node) WaitForPubSubEvent(id peer.ID) error { } return nil case <-time.After(evtWaitTimeout): - return errors.New("waiting for pubsub timed out") + return ErrPubSubWaitTimeout case <-n.ctx.Done(): return nil } @@ -357,7 +347,7 @@ func (n *Node) WaitForPushLogByPeerEvent(id peer.ID) error { } return nil case <-time.After(evtWaitTimeout): - return errors.New("waiting for pushlog timed out") + return ErrPushLogWaitTimeout case <-n.ctx.Done(): return nil } @@ -380,7 +370,7 @@ func (n *Node) WaitForPushLogFromPeerEvent(id peer.ID) error { } return nil case <-time.After(evtWaitTimeout): - return errors.New("waiting for pushlog timed out") + return ErrPushLogWaitTimeout case <-n.ctx.Done(): return nil } diff --git a/net/node_test.go b/net/node_test.go new file mode 100644 index 0000000000..b2ab255b20 --- /dev/null +++ b/net/node_test.go @@ -0,0 +1,799 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "bytes" + "context" + "testing" + "time" + + badger "github.com/dgraph-io/badger/v3" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/logging" + netutils "github.com/sourcenetwork/defradb/net/utils" +) + +// Node.Boostrap is not tested because the underlying, *ipfslite.Peer.Bootstrap is a best-effort function. + +func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { + var database client.DB + var options []db.Option + ctx := context.Background() + options = append(options, db.WithUpdateEvents()) + opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + rootstore, err := badgerds.NewDatastore("", &opts) + require.NoError(t, err) + database, err = db.NewDB(ctx, rootstore, options...) + require.NoError(t, err) + return database +} + +func TestNewNode_WithEnableRelay_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + _, err = NewNode( + context.Background(), + db, + WithEnableRelay(true), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) +} + +func TestNewNode_WithInvalidListenTCPAddrString_ParseError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + _, err = NewNode( + context.Background(), + db, + WithListenTCPAddrString("/ip4/碎片整理"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") +} + +func TestNewNode_WithDBClosed_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + db.Close(ctx) + _, err = NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.ErrorContains(t, err, "datastore closed") +} + +func TestNewNode_NoPubSub_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + n, err := NewNode( + context.Background(), + db, + WithPubSub(false), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + require.Nil(t, n.ps) +} + +func TestNewNode_WithPubSub_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + n, err := NewNode( + ctx, + db, + WithPubSub(true), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + + require.NoError(t, err) + // overly simple check of validity of pubsub, avoiding the process of creating a PubSub + require.NotNil(t, n.ps) +} + +func TestNewNode_WithPubSub_FailsWithoutWithDataPath(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + _, err = NewNode( + ctx, + db, + WithPubSub(true), + ) + require.EqualError(t, err, "1 error occurred:\n\t* mkdir : no such file or directory\n\n") +} + +func TestNodeClose_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + err = n.Close() + require.NoError(t, err) +} + +func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + n1.Boostrap([]peer.AddrInfo{}) +} + +func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + n2, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) +} + +func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + n1, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + n2, err := NewNode( + ctx, + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + addrs, err := netutils.ParsePeers([]string{ + n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String(), + "/ip4/0.0.0.0/tcp/1234/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci6", + "/ip4/0.0.0.0/tcp/1235/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci5", + "/ip4/0.0.0.0/tcp/1236/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci4", + }) + require.NoError(t, err) + n2.Boostrap(addrs) +} + +func mergeOptions(nodeOpts ...NodeOpt) (Options, error) { + var options Options + var nodeOpt NodeOpt + for _, opt := range append(nodeOpts, nodeOpt) { + if opt == nil { + continue + } + if err := opt(&options); err != nil { + return options, err + } + } + return options, nil +} + +func TestListenAddrs_WithListenP2PAddrStrings_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + n, err := NewNode( + context.Background(), + db, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + require.Contains(t, n.ListenAddrs()[0].String(), "/tcp/") +} + +func TestWithListenTCPAddrString_WithInvalidListenTCPAddrString_ParseError(t *testing.T) { + opt := WithListenTCPAddrString("/ip4/碎片整理") + options, err := mergeOptions(opt) + require.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") + require.Equal(t, Options{}, options) +} + +func TestNodeConfig_NoError(t *testing.T) { + tempDir := t.TempDir() + + cfg := config.DefaultConfig() + cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/9179" + cfg.Net.TCPAddress = "/ip4/0.0.0.0/tcp/9169" + cfg.Net.RPCTimeout = "100s" + cfg.Net.RPCMaxConnectionIdle = "111s" + cfg.Net.RelayEnabled = true + cfg.Net.PubSubEnabled = true + cfg.Datastore.Badger.Path = tempDir + + configOpt := WithConfig(cfg) + options, err := NewMergedOptions(configOpt) + require.NoError(t, err) + + // confirming it provides the same config as a manually constructed node.Options + p2pAddr, err := ma.NewMultiaddr(cfg.Net.P2PAddress) + require.NoError(t, err) + tcpAddr, err := ma.NewMultiaddr(cfg.Net.TCPAddress) + require.NoError(t, err) + connManager, err := NewConnManager(100, 400, time.Second*20) + require.NoError(t, err) + expectedOptions := Options{ + ListenAddrs: []ma.Multiaddr{p2pAddr}, + TCPAddr: tcpAddr, + DataPath: tempDir, + EnablePubSub: true, + EnableRelay: true, + ConnManager: connManager, + } + + for k, v := range options.ListenAddrs { + require.Equal(t, expectedOptions.ListenAddrs[k], v) + } + require.Equal(t, expectedOptions.TCPAddr.String(), options.TCPAddr.String()) + require.Equal(t, expectedOptions.DataPath, options.DataPath) + require.Equal(t, expectedOptions.EnablePubSub, options.EnablePubSub) + require.Equal(t, expectedOptions.EnableRelay, options.EnableRelay) +} + +func TestSubscribeToPeerConnectionEvents_SubscriptionError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + n.Peer.host = &mockHost{n.Peer.host} + + n.subscribeToPeerConnectionEvents() + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 1 { + t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) + } + require.Equal(t, "failed to subscribe to peer connectedness changed event: mock error", logLines[0]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") +} + +func TestPeerConnectionEventEmitter_SingleEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) + require.NoError(t, err) + + err = emitter.Emit(event.EvtPeerConnectednessChanged{}) + require.NoError(t, err) +} + +func TestPeerConnectionEventEmitter_MultiEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) + require.NoError(t, err) + + // the emitter can take 20 events in the channel. This tests what happens whe go over the 20 events. + for i := 0; i < 21; i++ { + err = emitter.Emit(event.EvtPeerConnectednessChanged{}) + require.NoError(t, err) + } +} + +func TestSubscribeToPubSubEvents_SubscriptionError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + n.Peer.host = &mockHost{n.Peer.host} + + n.subscribeToPubSubEvents() + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 1 { + t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) + } + require.Equal(t, "failed to subscribe to pubsub event: mock error", logLines[0]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") +} + +func TestPubSubEventEmitter_SingleEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) + require.NoError(t, err) + + err = emitter.Emit(EvtPubSub{}) + require.NoError(t, err) +} + +func TestPubSubEventEmitter_MultiEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) + require.NoError(t, err) + + // the emitter can take 20 events in the channel. This tests what happens whe go over the 20 events. + for i := 0; i < 21; i++ { + err = emitter.Emit(EvtPubSub{}) + require.NoError(t, err) + } +} + +func TestSubscribeToPushLogEvents_SubscriptionError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + n.Peer.host = &mockHost{n.Peer.host} + + n.subscribeToPushLogEvents() + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 1 { + t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) + } + require.Equal(t, "failed to subscribe to push log event: mock error", logLines[0]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") +} + +func TestPushLogEventEmitter_SingleEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) +} + +func TestPushLogEventEmitter_MultiEvent_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + // the emitter can take 20 events in the channel. This tests what happens whe go over the 20 events. + for i := 0; i < 21; i++ { + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) + } +} + +func TestWaitForPeerConnectionEvent_WithSamePeer_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) + require.NoError(t, err) + + err = emitter.Emit(event.EvtPeerConnectednessChanged{ + Peer: n.PeerID(), + }) + require.NoError(t, err) + + err = n.WaitForPeerConnectionEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPeerConnectionEvent_WithDifferentPeer_TimeoutError(t *testing.T) { + evtWaitTimeout = 100 * time.Microsecond + defer func() { + evtWaitTimeout = 10 * time.Second + }() + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) + require.NoError(t, err) + + err = emitter.Emit(event.EvtPeerConnectednessChanged{}) + require.NoError(t, err) + + err = n.WaitForPeerConnectionEvent(n.PeerID()) + require.ErrorIs(t, err, ErrPeerConnectionWaitTimout) +} + +func TestWaitForPeerConnectionEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) + require.NoError(t, err) + + err = emitter.Emit(event.EvtPeerConnectednessChanged{}) + require.NoError(t, err) + + n.cancel() + + err = n.WaitForPeerConnectionEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPubSubEvent_WithSamePeer_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) + require.NoError(t, err) + + err = emitter.Emit(EvtPubSub{ + Peer: n.PeerID(), + }) + require.NoError(t, err) + + err = n.WaitForPubSubEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPubSubEvent_WithDifferentPeer_TimeoutError(t *testing.T) { + evtWaitTimeout = 100 * time.Microsecond + defer func() { + evtWaitTimeout = 10 * time.Second + }() + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) + require.NoError(t, err) + + err = emitter.Emit(EvtPubSub{}) + require.NoError(t, err) + + err = n.WaitForPubSubEvent(n.PeerID()) + require.ErrorIs(t, err, ErrPubSubWaitTimeout) +} + +func TestWaitForPubSubEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) + require.NoError(t, err) + + err = emitter.Emit(EvtPubSub{}) + require.NoError(t, err) + + n.cancel() + + err = n.WaitForPubSubEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPushLogByPeerEvent_WithSamePeer_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{ + ByPeer: n.PeerID(), + }) + require.NoError(t, err) + + err = n.WaitForPushLogByPeerEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPushLogByPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) { + evtWaitTimeout = 100 * time.Microsecond + defer func() { + evtWaitTimeout = 10 * time.Second + }() + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) + + err = n.WaitForPushLogByPeerEvent(n.PeerID()) + require.ErrorIs(t, err, ErrPushLogWaitTimeout) +} + +func TestWaitForPushLogByPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) + + n.cancel() + + err = n.WaitForPushLogByPeerEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPushLogFromPeerEvent_WithSamePeer_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{ + FromPeer: n.PeerID(), + }) + require.NoError(t, err) + + err = n.WaitForPushLogFromPeerEvent(n.PeerID()) + require.NoError(t, err) +} + +func TestWaitForPushLogFromPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) { + evtWaitTimeout = 100 * time.Microsecond + defer func() { + evtWaitTimeout = 10 * time.Second + }() + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) + + err = n.WaitForPushLogFromPeerEvent(n.PeerID()) + require.ErrorIs(t, err, ErrPushLogWaitTimeout) +} + +func TestWaitForPushLogFromPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + db := FixtureNewMemoryDBWithBroadcaster(t) + n, err := NewNode( + context.Background(), + db, + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + emitter, err := n.host.EventBus().Emitter(new(EvtReceivedPushLog)) + require.NoError(t, err) + + err = emitter.Emit(EvtReceivedPushLog{}) + require.NoError(t, err) + + n.cancel() + + err = n.WaitForPushLogFromPeerEvent(n.PeerID()) + require.NoError(t, err) +} diff --git a/net/pb/Makefile b/net/pb/Makefile index e96e192c5a..62eef77354 100644 --- a/net/pb/Makefile +++ b/net/pb/Makefile @@ -4,9 +4,12 @@ GO = $(PB:.proto=.pb.go) all: $(GO) %.pb.go: %.proto - protoc -I=. -I=$(GOPATH)/src -I=$(GOPATH)/src/github.com/gogo/protobuf/protobuf --gogofaster_out=\ - plugins=grpc:\ - . $< + protoc \ + --go_out=. --plugin protoc-gen-go="${GOBIN}/protoc-gen-go" \ + --go-grpc_out=. --plugin protoc-gen-go-grpc="${GOBIN}/protoc-gen-go-grpc" \ + --go-vtproto_out=. --plugin protoc-gen-go-vtproto="${GOBIN}/protoc-gen-go-vtproto" \ + --go-vtproto_opt=features=marshal+unmarshal+size \ + $< clean: rm -f *.pb.go diff --git a/net/pb/custom.go b/net/pb/custom.go deleted file mode 100644 index c71f585fd4..0000000000 --- a/net/pb/custom.go +++ /dev/null @@ -1,196 +0,0 @@ -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package net_pb - -import ( - "encoding/json" - - "github.com/gogo/protobuf/proto" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "github.com/multiformats/go-varint" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/errors" -) - -// customGogoType aggregates the interfaces that custom Gogo types need to implement. -// it is only used for type assertions. -type customGogoType interface { - proto.Marshaler - proto.Unmarshaler - json.Marshaler - json.Unmarshaler - proto.Sizer - MarshalTo(data []byte) (n int, err error) -} - -// LibP2P object custom protobuf types - -// ProtoPeerID is a custom type used by gogo to serde raw peer IDs into the peer.ID type, and back. -type ProtoPeerID struct { - peer.ID -} - -var _ customGogoType = (*ProtoPeerID)(nil) - -func (id ProtoPeerID) Marshal() ([]byte, error) { - return []byte(id.ID), nil -} - -func (id ProtoPeerID) MarshalTo(data []byte) (n int, err error) { - return copy(data, id.ID), nil -} - -func (id ProtoPeerID) MarshalJSON() ([]byte, error) { - m, _ := id.Marshal() - return json.Marshal(m) -} - -func (id *ProtoPeerID) Unmarshal(data []byte) (err error) { - id.ID = peer.ID(string(data)) - return nil -} - -func (id *ProtoPeerID) UnmarshalJSON(data []byte) error { - var v []byte - err := json.Unmarshal(data, &v) - if err != nil { - return err - } - return id.Unmarshal(v) -} - -func (id ProtoPeerID) Size() int { - return len([]byte(id.ID)) -} - -// ProtoAddr is a custom type used by gogo to serde raw multiaddresses into -// the ma.Multiaddr type, and back. -type ProtoAddr struct { - ma.Multiaddr -} - -var _ customGogoType = (*ProtoAddr)(nil) - -func (a ProtoAddr) Marshal() ([]byte, error) { - return a.Bytes(), nil -} - -func (a ProtoAddr) MarshalTo(data []byte) (n int, err error) { - return copy(data, a.Bytes()), nil -} - -func (a ProtoAddr) MarshalJSON() ([]byte, error) { - m, _ := a.Marshal() - return json.Marshal(m) -} - -func (a *ProtoAddr) Unmarshal(data []byte) (err error) { - a.Multiaddr, err = ma.NewMultiaddrBytes(data) - return err -} - -func (a *ProtoAddr) UnmarshalJSON(data []byte) error { - v := new([]byte) - err := json.Unmarshal(data, v) - if err != nil { - return err - } - return a.Unmarshal(*v) -} - -func (a ProtoAddr) Size() int { - return len(a.Bytes()) -} - -// ProtoCid is a custom type used by gogo to serde raw CIDs into the cid.CID type, and back. -type ProtoCid struct { - cid.Cid -} - -var _ customGogoType = (*ProtoCid)(nil) - -func (c ProtoCid) Marshal() ([]byte, error) { - return c.Bytes(), nil -} - -func (c ProtoCid) MarshalTo(data []byte) (n int, err error) { - return copy(data, c.Bytes()), nil -} - -func (c ProtoCid) MarshalJSON() ([]byte, error) { - m, _ := c.Marshal() - return json.Marshal(m) -} - -func (c *ProtoCid) Unmarshal(data []byte) (err error) { - c.Cid, err = cid.Cast(data) - if errors.Is(err, varint.ErrUnderflow) { - c.Cid = cid.Undef - return nil - } - return err -} - -func (c *ProtoCid) UnmarshalJSON(data []byte) error { - v := new([]byte) - err := json.Unmarshal(data, v) - if err != nil { - return err - } - return c.Unmarshal(*v) -} - -func (c ProtoCid) Size() int { - return len(c.Bytes()) -} - -// ProtoCid is a custom type used by gogo to serde raw CIDs into the cid.CID type, and back. -type ProtoDocKey struct { - client.DocKey -} - -var _ customGogoType = (*ProtoDocKey)(nil) - -func (c ProtoDocKey) Marshal() ([]byte, error) { - return []byte(c.String()), nil -} - -func (c ProtoDocKey) MarshalTo(data []byte) (n int, err error) { - return copy(data, []byte(c.String())), nil -} - -func (c ProtoDocKey) MarshalJSON() ([]byte, error) { - m, _ := c.Marshal() - return json.Marshal(m) -} - -func (c *ProtoDocKey) Unmarshal(data []byte) (err error) { - c.DocKey, err = client.NewDocKeyFromString(string(data)) - return err -} - -func (c *ProtoDocKey) UnmarshalJSON(data []byte) error { - v := new([]byte) - err := json.Unmarshal(data, v) - if err != nil { - return err - } - return c.Unmarshal(*v) -} - -func (c ProtoDocKey) Size() int { - return len([]byte(c.String())) -} diff --git a/net/pb/net.pb.go b/net/pb/net.pb.go index 14ba3650b5..70daae73a7 100644 --- a/net/pb/net.pb.go +++ b/net/pb/net.pb.go @@ -1,2445 +1,1922 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.9 // source: net.proto package net_pb import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Log represents a thread log. type Document struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // ID of the document. - DocKey *ProtoDocKey `protobuf:"bytes,1,opt,name=docKey,proto3,customtype=ProtoDocKey" json:"docKey,omitempty"` + DocKey []byte `protobuf:"bytes,1,opt,name=docKey,proto3" json:"docKey,omitempty"` // head of the log. - Head *ProtoCid `protobuf:"bytes,4,opt,name=head,proto3,customtype=ProtoCid" json:"head,omitempty"` + Head []byte `protobuf:"bytes,4,opt,name=head,proto3" json:"head,omitempty"` } -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{0} -} -func (m *Document) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Document.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Document) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document.Merge(m, src) -} -func (m *Document) XXX_Size() int { - return m.Size() -} -func (m *Document) XXX_DiscardUnknown() { - xxx_messageInfo_Document.DiscardUnknown(m) -} -var xxx_messageInfo_Document proto.InternalMessageInfo - -// Record is a thread record containing link data. -type Document_Log struct { - // block is the top-level node's raw data as an ipld.Block. - Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Document_Log) Reset() { *m = Document_Log{} } -func (m *Document_Log) String() string { return proto.CompactTextString(m) } -func (*Document_Log) ProtoMessage() {} -func (*Document_Log) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{0, 0} -} -func (m *Document_Log) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Document_Log) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Document_Log.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *Document_Log) XXX_Merge(src proto.Message) { - xxx_messageInfo_Document_Log.Merge(m, src) -} -func (m *Document_Log) XXX_Size() int { - return m.Size() -} -func (m *Document_Log) XXX_DiscardUnknown() { - xxx_messageInfo_Document_Log.DiscardUnknown(m) + +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_Document_Log proto.InternalMessageInfo +func (x *Document) GetDocKey() []byte { + if x != nil { + return x.DocKey + } + return nil +} -func (m *Document_Log) GetBlock() []byte { - if m != nil { - return m.Block +func (x *Document) GetHead() []byte { + if x != nil { + return x.Head } return nil } type GetDocGraphRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *GetDocGraphRequest) Reset() { *m = GetDocGraphRequest{} } -func (m *GetDocGraphRequest) String() string { return proto.CompactTextString(m) } -func (*GetDocGraphRequest) ProtoMessage() {} -func (*GetDocGraphRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{1} -} -func (m *GetDocGraphRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetDocGraphRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetDocGraphRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetDocGraphRequest) Reset() { + *x = GetDocGraphRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *GetDocGraphRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocGraphRequest.Merge(m, src) -} -func (m *GetDocGraphRequest) XXX_Size() int { - return m.Size() -} -func (m *GetDocGraphRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocGraphRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDocGraphRequest proto.InternalMessageInfo -type GetDocGraphReply struct { +func (x *GetDocGraphRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetDocGraphReply) Reset() { *m = GetDocGraphReply{} } -func (m *GetDocGraphReply) String() string { return proto.CompactTextString(m) } -func (*GetDocGraphReply) ProtoMessage() {} -func (*GetDocGraphReply) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{2} -} -func (m *GetDocGraphReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetDocGraphReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetDocGraphReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetDocGraphRequest) ProtoMessage() {} + +func (x *GetDocGraphRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetDocGraphReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDocGraphReply.Merge(m, src) -} -func (m *GetDocGraphReply) XXX_Size() int { - return m.Size() + +// Deprecated: Use GetDocGraphRequest.ProtoReflect.Descriptor instead. +func (*GetDocGraphRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{1} } -func (m *GetDocGraphReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetDocGraphReply.DiscardUnknown(m) + +type GetDocGraphReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_GetDocGraphReply proto.InternalMessageInfo +func (x *GetDocGraphReply) Reset() { + *x = GetDocGraphReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type PushDocGraphRequest struct { +func (x *GetDocGraphReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushDocGraphRequest) Reset() { *m = PushDocGraphRequest{} } -func (m *PushDocGraphRequest) String() string { return proto.CompactTextString(m) } -func (*PushDocGraphRequest) ProtoMessage() {} -func (*PushDocGraphRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{3} -} -func (m *PushDocGraphRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PushDocGraphRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PushDocGraphRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetDocGraphReply) ProtoMessage() {} + +func (x *GetDocGraphReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *PushDocGraphRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PushDocGraphRequest.Merge(m, src) -} -func (m *PushDocGraphRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use GetDocGraphReply.ProtoReflect.Descriptor instead. +func (*GetDocGraphReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{2} } -func (m *PushDocGraphRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PushDocGraphRequest.DiscardUnknown(m) + +type PushDocGraphRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_PushDocGraphRequest proto.InternalMessageInfo +func (x *PushDocGraphRequest) Reset() { + *x = PushDocGraphRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type PushDocGraphReply struct { +func (x *PushDocGraphRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushDocGraphReply) Reset() { *m = PushDocGraphReply{} } -func (m *PushDocGraphReply) String() string { return proto.CompactTextString(m) } -func (*PushDocGraphReply) ProtoMessage() {} -func (*PushDocGraphReply) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{4} -} -func (m *PushDocGraphReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PushDocGraphReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PushDocGraphReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*PushDocGraphRequest) ProtoMessage() {} + +func (x *PushDocGraphRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *PushDocGraphReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_PushDocGraphReply.Merge(m, src) -} -func (m *PushDocGraphReply) XXX_Size() int { - return m.Size() + +// Deprecated: Use PushDocGraphRequest.ProtoReflect.Descriptor instead. +func (*PushDocGraphRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{3} } -func (m *PushDocGraphReply) XXX_DiscardUnknown() { - xxx_messageInfo_PushDocGraphReply.DiscardUnknown(m) + +type PushDocGraphReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_PushDocGraphReply proto.InternalMessageInfo +func (x *PushDocGraphReply) Reset() { + *x = PushDocGraphReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type GetLogRequest struct { +func (x *PushDocGraphReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetLogRequest) Reset() { *m = GetLogRequest{} } -func (m *GetLogRequest) String() string { return proto.CompactTextString(m) } -func (*GetLogRequest) ProtoMessage() {} -func (*GetLogRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{5} -} -func (m *GetLogRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetLogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetLogRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*PushDocGraphReply) ProtoMessage() {} + +func (x *PushDocGraphReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetLogRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetLogRequest.Merge(m, src) -} -func (m *GetLogRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use PushDocGraphReply.ProtoReflect.Descriptor instead. +func (*PushDocGraphReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{4} } -func (m *GetLogRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetLogRequest.DiscardUnknown(m) + +type GetLogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_GetLogRequest proto.InternalMessageInfo +func (x *GetLogRequest) Reset() { + *x = GetLogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type GetLogReply struct { +func (x *GetLogRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetLogReply) Reset() { *m = GetLogReply{} } -func (m *GetLogReply) String() string { return proto.CompactTextString(m) } -func (*GetLogReply) ProtoMessage() {} -func (*GetLogReply) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{6} -} -func (m *GetLogReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetLogReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetLogReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetLogRequest) ProtoMessage() {} + +func (x *GetLogRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetLogReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetLogReply.Merge(m, src) -} -func (m *GetLogReply) XXX_Size() int { - return m.Size() + +// Deprecated: Use GetLogRequest.ProtoReflect.Descriptor instead. +func (*GetLogRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{5} } -func (m *GetLogReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetLogReply.DiscardUnknown(m) + +type GetLogReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_GetLogReply proto.InternalMessageInfo +func (x *GetLogReply) Reset() { + *x = GetLogReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type PushLogRequest struct { - Body *PushLogRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` +func (x *GetLogReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushLogRequest) Reset() { *m = PushLogRequest{} } -func (m *PushLogRequest) String() string { return proto.CompactTextString(m) } -func (*PushLogRequest) ProtoMessage() {} -func (*PushLogRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{7} -} -func (m *PushLogRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PushLogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PushLogRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetLogReply) ProtoMessage() {} + +func (x *GetLogReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *PushLogRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PushLogRequest.Merge(m, src) -} -func (m *PushLogRequest) XXX_Size() int { - return m.Size() -} -func (m *PushLogRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PushLogRequest.DiscardUnknown(m) + +// Deprecated: Use GetLogReply.ProtoReflect.Descriptor instead. +func (*GetLogReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{6} } -var xxx_messageInfo_PushLogRequest proto.InternalMessageInfo +type PushLogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Body *PushLogRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` +} -func (m *PushLogRequest) GetBody() *PushLogRequest_Body { - if m != nil { - return m.Body +func (x *PushLogRequest) Reset() { + *x = PushLogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type PushLogRequest_Body struct { - // docKey is the target DocKey. - DocKey *ProtoDocKey `protobuf:"bytes,1,opt,name=docKey,proto3,customtype=ProtoDocKey" json:"docKey,omitempty"` - // cid is the target CID. - Cid *ProtoCid `protobuf:"bytes,2,opt,name=cid,proto3,customtype=ProtoCid" json:"cid,omitempty"` - // - SchemaID []byte `protobuf:"bytes,3,opt,name=schemaID,proto3" json:"schemaID,omitempty"` - // from is the peer ID of the peer that created the log - Creator string `protobuf:"bytes,4,opt,name=creator,proto3" json:"creator,omitempty"` - // record is the actual record payload. - Log *Document_Log `protobuf:"bytes,5,opt,name=log,proto3" json:"log,omitempty"` +func (x *PushLogRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushLogRequest_Body) Reset() { *m = PushLogRequest_Body{} } -func (m *PushLogRequest_Body) String() string { return proto.CompactTextString(m) } -func (*PushLogRequest_Body) ProtoMessage() {} -func (*PushLogRequest_Body) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{7, 0} -} -func (m *PushLogRequest_Body) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PushLogRequest_Body) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PushLogRequest_Body.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*PushLogRequest) ProtoMessage() {} + +func (x *PushLogRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *PushLogRequest_Body) XXX_Merge(src proto.Message) { - xxx_messageInfo_PushLogRequest_Body.Merge(m, src) -} -func (m *PushLogRequest_Body) XXX_Size() int { - return m.Size() -} -func (m *PushLogRequest_Body) XXX_DiscardUnknown() { - xxx_messageInfo_PushLogRequest_Body.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_PushLogRequest_Body proto.InternalMessageInfo +// Deprecated: Use PushLogRequest.ProtoReflect.Descriptor instead. +func (*PushLogRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{7} +} -func (m *PushLogRequest_Body) GetSchemaID() []byte { - if m != nil { - return m.SchemaID +func (x *PushLogRequest) GetBody() *PushLogRequest_Body { + if x != nil { + return x.Body } return nil } -func (m *PushLogRequest_Body) GetCreator() string { - if m != nil { - return m.Creator - } - return "" +type GetHeadLogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *PushLogRequest_Body) GetLog() *Document_Log { - if m != nil { - return m.Log +func (x *GetHeadLogRequest) Reset() { + *x = GetHeadLogRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type GetHeadLogRequest struct { +func (x *GetHeadLogRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetHeadLogRequest) Reset() { *m = GetHeadLogRequest{} } -func (m *GetHeadLogRequest) String() string { return proto.CompactTextString(m) } -func (*GetHeadLogRequest) ProtoMessage() {} -func (*GetHeadLogRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{8} -} -func (m *GetHeadLogRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetHeadLogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetHeadLogRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetHeadLogRequest) ProtoMessage() {} + +func (x *GetHeadLogRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetHeadLogRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHeadLogRequest.Merge(m, src) -} -func (m *GetHeadLogRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use GetHeadLogRequest.ProtoReflect.Descriptor instead. +func (*GetHeadLogRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{8} } -func (m *GetHeadLogRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetHeadLogRequest.DiscardUnknown(m) + +type PushLogReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_GetHeadLogRequest proto.InternalMessageInfo +func (x *PushLogReply) Reset() { + *x = PushLogReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type PushLogReply struct { +func (x *PushLogReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushLogReply) Reset() { *m = PushLogReply{} } -func (m *PushLogReply) String() string { return proto.CompactTextString(m) } -func (*PushLogReply) ProtoMessage() {} -func (*PushLogReply) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{9} -} -func (m *PushLogReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PushLogReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PushLogReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*PushLogReply) ProtoMessage() {} + +func (x *PushLogReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use PushLogReply.ProtoReflect.Descriptor instead. +func (*PushLogReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{9} } -func (m *PushLogReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_PushLogReply.Merge(m, src) + +type GetHeadLogReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *PushLogReply) XXX_Size() int { - return m.Size() + +func (x *GetHeadLogReply) Reset() { + *x = GetHeadLogReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *PushLogReply) XXX_DiscardUnknown() { - xxx_messageInfo_PushLogReply.DiscardUnknown(m) + +func (x *GetHeadLogReply) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PushLogReply proto.InternalMessageInfo +func (*GetHeadLogReply) ProtoMessage() {} -type GetHeadLogReply struct { +func (x *GetHeadLogReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *GetHeadLogReply) Reset() { *m = GetHeadLogReply{} } -func (m *GetHeadLogReply) String() string { return proto.CompactTextString(m) } -func (*GetHeadLogReply) ProtoMessage() {} +// Deprecated: Use GetHeadLogReply.ProtoReflect.Descriptor instead. func (*GetHeadLogReply) Descriptor() ([]byte, []int) { - return fileDescriptor_a5b10ce944527a32, []int{10} -} -func (m *GetHeadLogReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetHeadLogReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetHeadLogReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetHeadLogReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHeadLogReply.Merge(m, src) -} -func (m *GetHeadLogReply) XXX_Size() int { - return m.Size() -} -func (m *GetHeadLogReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetHeadLogReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHeadLogReply proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Document)(nil), "net.pb.Document") - proto.RegisterType((*Document_Log)(nil), "net.pb.Document.Log") - proto.RegisterType((*GetDocGraphRequest)(nil), "net.pb.GetDocGraphRequest") - proto.RegisterType((*GetDocGraphReply)(nil), "net.pb.GetDocGraphReply") - proto.RegisterType((*PushDocGraphRequest)(nil), "net.pb.PushDocGraphRequest") - proto.RegisterType((*PushDocGraphReply)(nil), "net.pb.PushDocGraphReply") - proto.RegisterType((*GetLogRequest)(nil), "net.pb.GetLogRequest") - proto.RegisterType((*GetLogReply)(nil), "net.pb.GetLogReply") - proto.RegisterType((*PushLogRequest)(nil), "net.pb.PushLogRequest") - proto.RegisterType((*PushLogRequest_Body)(nil), "net.pb.PushLogRequest.Body") - proto.RegisterType((*GetHeadLogRequest)(nil), "net.pb.GetHeadLogRequest") - proto.RegisterType((*PushLogReply)(nil), "net.pb.PushLogReply") - proto.RegisterType((*GetHeadLogReply)(nil), "net.pb.GetHeadLogReply") -} - -func init() { proto.RegisterFile("net.proto", fileDescriptor_a5b10ce944527a32) } - -var fileDescriptor_a5b10ce944527a32 = []byte{ - // 480 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x6e, 0xda, 0x40, - 0x10, 0x86, 0x71, 0x20, 0x40, 0x06, 0x12, 0x9a, 0x81, 0xb4, 0xce, 0x46, 0x72, 0x22, 0x0e, 0x6d, - 0x2f, 0x35, 0x52, 0x2a, 0x55, 0xea, 0x95, 0x52, 0x91, 0xaa, 0x39, 0x44, 0xee, 0x13, 0xd8, 0xeb, - 0xad, 0x8d, 0x0a, 0x59, 0x6a, 0xd6, 0x95, 0x78, 0x8b, 0xbe, 0x48, 0xdf, 0xa3, 0xc7, 0xf4, 0x56, - 0xe5, 0x10, 0x55, 0xf0, 0x04, 0x7d, 0x83, 0x6a, 0x67, 0x63, 0xc0, 0xe0, 0x43, 0x6f, 0x9e, 0xf9, - 0xff, 0x99, 0xd9, 0xf9, 0x46, 0x86, 0x83, 0x5b, 0xa1, 0xdc, 0x69, 0x22, 0x95, 0xc4, 0x2a, 0x7d, - 0x06, 0xec, 0x55, 0x34, 0x52, 0x71, 0x1a, 0xb8, 0x5c, 0x4e, 0x7a, 0x91, 0x8c, 0x64, 0x8f, 0xe4, - 0x20, 0xfd, 0x4c, 0x11, 0x05, 0xf4, 0x65, 0xca, 0xba, 0x09, 0xd4, 0x07, 0x92, 0xa7, 0x13, 0x71, - 0xab, 0xf0, 0x05, 0x54, 0x43, 0xc9, 0x3f, 0x8a, 0xb9, 0x6d, 0x5d, 0x58, 0x2f, 0x9b, 0xfd, 0xd6, - 0xfd, 0xc3, 0x79, 0xe3, 0x46, 0xdb, 0x06, 0x94, 0xf6, 0x1e, 0x65, 0xbc, 0x80, 0x4a, 0x2c, 0xfc, - 0xd0, 0xae, 0x90, 0xad, 0x79, 0xff, 0x70, 0x5e, 0x27, 0xdb, 0xbb, 0x51, 0xe8, 0x91, 0xc2, 0xce, - 0xa0, 0x7c, 0x2d, 0x23, 0xec, 0xc0, 0x7e, 0x30, 0x96, 0xfc, 0x8b, 0x69, 0xe8, 0x99, 0xa0, 0xdb, - 0x01, 0x1c, 0x0a, 0x35, 0x90, 0x7c, 0x98, 0xf8, 0xd3, 0xd8, 0x13, 0x5f, 0x53, 0x31, 0x53, 0x5d, - 0x84, 0x27, 0xb9, 0xec, 0x74, 0x3c, 0xef, 0x9e, 0x40, 0xfb, 0x26, 0x9d, 0xc5, 0xdb, 0xd6, 0x36, - 0x1c, 0xe7, 0xd3, 0xda, 0xdb, 0x82, 0xc3, 0xa1, 0x50, 0xd7, 0x32, 0xca, 0x5c, 0x87, 0xd0, 0xc8, - 0x12, 0x5a, 0xff, 0x6b, 0xc1, 0x91, 0xae, 0x5a, 0x3b, 0xb0, 0x07, 0x95, 0x40, 0x86, 0x66, 0xdd, - 0xc6, 0xe5, 0x99, 0x6b, 0x10, 0xba, 0x79, 0x97, 0xdb, 0x97, 0xe1, 0xdc, 0x23, 0x23, 0xfb, 0x61, - 0x41, 0x45, 0x87, 0xff, 0x8f, 0xca, 0x81, 0x32, 0x1f, 0x85, 0xf6, 0x5e, 0x01, 0x29, 0x2d, 0x20, - 0x83, 0xfa, 0x8c, 0xc7, 0x62, 0xe2, 0x7f, 0x18, 0xd8, 0x65, 0x82, 0xb4, 0x8a, 0xd1, 0x86, 0x1a, - 0x4f, 0x84, 0xaf, 0x64, 0x42, 0xa4, 0x0f, 0xbc, 0x2c, 0xc4, 0xe7, 0x50, 0x1e, 0xcb, 0xc8, 0xde, - 0xa7, 0x77, 0x77, 0xb2, 0x77, 0x67, 0x87, 0x74, 0xf5, 0xe3, 0xb5, 0x41, 0x83, 0x1a, 0x0a, 0x75, - 0x25, 0xfc, 0x70, 0x83, 0xcb, 0x11, 0x34, 0x57, 0x1b, 0x6a, 0x30, 0xc7, 0xd0, 0xda, 0x34, 0x4d, - 0xc7, 0xf3, 0xcb, 0x5f, 0x7b, 0x50, 0xfb, 0x24, 0x92, 0x6f, 0x23, 0x2e, 0xf0, 0x3d, 0x61, 0xcc, - 0x58, 0x23, 0xcb, 0xa6, 0xed, 0x9e, 0x90, 0xd9, 0x85, 0x9a, 0x9e, 0x51, 0xc2, 0x2b, 0x33, 0x75, - 0xd5, 0x27, 0x47, 0x7b, 0xbb, 0xd1, 0x69, 0xb1, 0x68, 0x3a, 0xbd, 0x81, 0xaa, 0xb9, 0x2b, 0x9e, - 0x6c, 0xcc, 0x5b, 0x2f, 0xc8, 0xda, 0xdb, 0x69, 0x53, 0xf7, 0x16, 0x6a, 0x8f, 0x7b, 0xe3, 0xd3, - 0xe2, 0x53, 0xb3, 0xce, 0x4e, 0xde, 0x94, 0xf6, 0x01, 0xd6, 0x88, 0xf0, 0x74, 0xa3, 0x7f, 0x9e, - 0x2d, 0x7b, 0x56, 0x24, 0x51, 0x8f, 0xbe, 0xfd, 0x73, 0xe1, 0x58, 0x77, 0x0b, 0xc7, 0xfa, 0xb3, - 0x70, 0xac, 0xef, 0x4b, 0xa7, 0x74, 0xb7, 0x74, 0x4a, 0xbf, 0x97, 0x4e, 0x29, 0xa8, 0xd2, 0xaf, - 0xf8, 0xfa, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x46, 0x27, 0x1c, 0xce, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn + return file_net_proto_rawDescGZIP(), []int{10} +} -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +type SetReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -// ServiceClient is the client API for Service service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ServiceClient interface { - // GetDocGraph from this peer. - GetDocGraph(ctx context.Context, in *GetDocGraphRequest, opts ...grpc.CallOption) (*GetDocGraphReply, error) - // PushDocGraph to this peer. - PushDocGraph(ctx context.Context, in *PushDocGraphRequest, opts ...grpc.CallOption) (*PushDocGraphReply, error) - // GetLog from this peer. - GetLog(ctx context.Context, in *GetLogRequest, opts ...grpc.CallOption) (*GetLogReply, error) - // PushLog to this peer. - PushLog(ctx context.Context, in *PushLogRequest, opts ...grpc.CallOption) (*PushLogReply, error) - // GetHeadLog from this peer - GetHeadLog(ctx context.Context, in *GetHeadLogRequest, opts ...grpc.CallOption) (*GetHeadLogReply, error) + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` + Addr []byte `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` } -type serviceClient struct { - cc *grpc.ClientConn +func (x *SetReplicatorRequest) Reset() { + *x = SetReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func NewServiceClient(cc *grpc.ClientConn) ServiceClient { - return &serviceClient{cc} +func (x *SetReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *serviceClient) GetDocGraph(ctx context.Context, in *GetDocGraphRequest, opts ...grpc.CallOption) (*GetDocGraphReply, error) { - out := new(GetDocGraphReply) - err := c.cc.Invoke(ctx, "/net.pb.Service/GetDocGraph", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*SetReplicatorRequest) ProtoMessage() {} -func (c *serviceClient) PushDocGraph(ctx context.Context, in *PushDocGraphRequest, opts ...grpc.CallOption) (*PushDocGraphReply, error) { - out := new(PushDocGraphReply) - err := c.cc.Invoke(ctx, "/net.pb.Service/PushDocGraph", in, out, opts...) - if err != nil { - return nil, err +func (x *SetReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *serviceClient) GetLog(ctx context.Context, in *GetLogRequest, opts ...grpc.CallOption) (*GetLogReply, error) { - out := new(GetLogReply) - err := c.cc.Invoke(ctx, "/net.pb.Service/GetLog", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use SetReplicatorRequest.ProtoReflect.Descriptor instead. +func (*SetReplicatorRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{11} } -func (c *serviceClient) PushLog(ctx context.Context, in *PushLogRequest, opts ...grpc.CallOption) (*PushLogReply, error) { - out := new(PushLogReply) - err := c.cc.Invoke(ctx, "/net.pb.Service/PushLog", in, out, opts...) - if err != nil { - return nil, err +func (x *SetReplicatorRequest) GetCollections() []string { + if x != nil { + return x.Collections } - return out, nil + return nil } -func (c *serviceClient) GetHeadLog(ctx context.Context, in *GetHeadLogRequest, opts ...grpc.CallOption) (*GetHeadLogReply, error) { - out := new(GetHeadLogReply) - err := c.cc.Invoke(ctx, "/net.pb.Service/GetHeadLog", in, out, opts...) - if err != nil { - return nil, err +func (x *SetReplicatorRequest) GetAddr() []byte { + if x != nil { + return x.Addr } - return out, nil + return nil } -// ServiceServer is the server API for Service service. -type ServiceServer interface { - // GetDocGraph from this peer. - GetDocGraph(context.Context, *GetDocGraphRequest) (*GetDocGraphReply, error) - // PushDocGraph to this peer. - PushDocGraph(context.Context, *PushDocGraphRequest) (*PushDocGraphReply, error) - // GetLog from this peer. - GetLog(context.Context, *GetLogRequest) (*GetLogReply, error) - // PushLog to this peer. - PushLog(context.Context, *PushLogRequest) (*PushLogReply, error) - // GetHeadLog from this peer - GetHeadLog(context.Context, *GetHeadLogRequest) (*GetHeadLogReply, error) -} +type SetReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -// UnimplementedServiceServer can be embedded to have forward compatible implementations. -type UnimplementedServiceServer struct { + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` } -func (*UnimplementedServiceServer) GetDocGraph(ctx context.Context, req *GetDocGraphRequest) (*GetDocGraphReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetDocGraph not implemented") +func (x *SetReplicatorReply) Reset() { + *x = SetReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (*UnimplementedServiceServer) PushDocGraph(ctx context.Context, req *PushDocGraphRequest) (*PushDocGraphReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method PushDocGraph not implemented") + +func (x *SetReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (*UnimplementedServiceServer) GetLog(ctx context.Context, req *GetLogRequest) (*GetLogReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetLog not implemented") + +func (*SetReplicatorReply) ProtoMessage() {} + +func (x *SetReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (*UnimplementedServiceServer) PushLog(ctx context.Context, req *PushLogRequest) (*PushLogReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method PushLog not implemented") + +// Deprecated: Use SetReplicatorReply.ProtoReflect.Descriptor instead. +func (*SetReplicatorReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{12} } -func (*UnimplementedServiceServer) GetHeadLog(ctx context.Context, req *GetHeadLogRequest) (*GetHeadLogReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetHeadLog not implemented") + +func (x *SetReplicatorReply) GetPeerID() []byte { + if x != nil { + return x.PeerID + } + return nil } -func RegisterServiceServer(s *grpc.Server, srv ServiceServer) { - s.RegisterService(&_Service_serviceDesc, srv) +type DeleteReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` + Collections []string `protobuf:"bytes,2,rep,name=collections,proto3" json:"collections,omitempty"` } -func _Service_GetDocGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetDocGraphRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetDocGraph(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/net.pb.Service/GetDocGraph", +func (x *DeleteReplicatorRequest) Reset() { + *x = DeleteReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetDocGraph(ctx, req.(*GetDocGraphRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Service_PushDocGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PushDocGraphRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).PushDocGraph(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/net.pb.Service/PushDocGraph", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).PushDocGraph(ctx, req.(*PushDocGraphRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *DeleteReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Service_GetLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetLogRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetLog(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/net.pb.Service/GetLog", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetLog(ctx, req.(*GetLogRequest)) +func (*DeleteReplicatorRequest) ProtoMessage() {} + +func (x *DeleteReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _Service_PushLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PushLogRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).PushLog(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/net.pb.Service/PushLog", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).PushLog(ctx, req.(*PushLogRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use DeleteReplicatorRequest.ProtoReflect.Descriptor instead. +func (*DeleteReplicatorRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{13} } -func _Service_GetHeadLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetHeadLogRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetHeadLog(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/net.pb.Service/GetHeadLog", +func (x *DeleteReplicatorRequest) GetPeerID() []byte { + if x != nil { + return x.PeerID } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetHeadLog(ctx, req.(*GetHeadLogRequest)) + return nil +} + +func (x *DeleteReplicatorRequest) GetCollections() []string { + if x != nil { + return x.Collections } - return interceptor(ctx, in, info, handler) + return nil } -var _Service_serviceDesc = grpc.ServiceDesc{ - ServiceName: "net.pb.Service", - HandlerType: (*ServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetDocGraph", - Handler: _Service_GetDocGraph_Handler, - }, - { - MethodName: "PushDocGraph", - Handler: _Service_PushDocGraph_Handler, - }, - { - MethodName: "GetLog", - Handler: _Service_GetLog_Handler, - }, - { - MethodName: "PushLog", - Handler: _Service_PushLog_Handler, - }, - { - MethodName: "GetHeadLog", - Handler: _Service_GetHeadLog_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "net.proto", +type DeleteReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` } -func (m *Document) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *DeleteReplicatorReply) Reset() { + *x = DeleteReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *Document) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *DeleteReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Document) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Head != nil { - { - size := m.Head.Size() - i -= size - if _, err := m.Head.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintNet(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.DocKey != nil { - { - size := m.DocKey.Size() - i -= size - if _, err := m.DocKey.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintNet(dAtA, i, uint64(size)) +func (*DeleteReplicatorReply) ProtoMessage() {} + +func (x *DeleteReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0xa + return ms } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *Document_Log) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +// Deprecated: Use DeleteReplicatorReply.ProtoReflect.Descriptor instead. +func (*DeleteReplicatorReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{14} } -func (m *Document_Log) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *DeleteReplicatorReply) GetPeerID() []byte { + if x != nil { + return x.PeerID + } + return nil } -func (m *Document_Log) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Block) > 0 { - i -= len(m.Block) - copy(dAtA[i:], m.Block) - i = encodeVarintNet(dAtA, i, uint64(len(m.Block))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +type GetAllReplicatorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *GetDocGraphRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorRequest) Reset() { + *x = GetAllReplicatorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *GetDocGraphRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *GetAllReplicatorRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetDocGraphRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} +func (*GetAllReplicatorRequest) ProtoMessage() {} -func (m *GetDocGraphReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *GetDocGraphReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use GetAllReplicatorRequest.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{15} } -func (m *GetDocGraphReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +type GetAllReplicatorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replicators []*GetAllReplicatorReply_Replicators `protobuf:"bytes,1,rep,name=replicators,proto3" json:"replicators,omitempty"` } -func (m *PushDocGraphRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply) Reset() { + *x = GetAllReplicatorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *PushDocGraphRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *GetAllReplicatorReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushDocGraphRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} +func (*GetAllReplicatorReply) ProtoMessage() {} -func (m *PushDocGraphReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *GetAllReplicatorReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) +} + +// Deprecated: Use GetAllReplicatorReply.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{16} } -func (m *PushDocGraphReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *GetAllReplicatorReply) GetReplicators() []*GetAllReplicatorReply_Replicators { + if x != nil { + return x.Replicators + } + return nil } -func (m *PushDocGraphReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +type AddP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *GetLogRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *AddP2PCollectionsRequest) Reset() { + *x = AddP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *GetLogRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *AddP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetLogRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} +func (*AddP2PCollectionsRequest) ProtoMessage() {} -func (m *GetLogReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *AddP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) +} + +// Deprecated: Use AddP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*AddP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{17} } -func (m *GetLogReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *AddP2PCollectionsRequest) GetCollections() []string { + if x != nil { + return x.Collections + } + return nil } -func (m *GetLogReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +type AddP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` } -func (m *PushLogRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *AddP2PCollectionsReply) Reset() { + *x = AddP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *PushLogRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *AddP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushLogRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Body != nil { - { - size, err := m.Body.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNet(dAtA, i, uint64(size)) +func (*AddP2PCollectionsReply) ProtoMessage() {} + +func (x *AddP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0xa + return ms } - return len(dAtA) - i, nil + return mi.MessageOf(x) +} + +// Deprecated: Use AddP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*AddP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{18} } -func (m *PushLogRequest_Body) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *AddP2PCollectionsReply) GetErr() string { + if x != nil { + return x.Err } - return dAtA[:n], nil + return "" } -func (m *PushLogRequest_Body) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type RemoveP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *PushLogRequest_Body) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Log != nil { - { - size, err := m.Log.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNet(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.Creator) > 0 { - i -= len(m.Creator) - copy(dAtA[i:], m.Creator) - i = encodeVarintNet(dAtA, i, uint64(len(m.Creator))) - i-- - dAtA[i] = 0x22 - } - if len(m.SchemaID) > 0 { - i -= len(m.SchemaID) - copy(dAtA[i:], m.SchemaID) - i = encodeVarintNet(dAtA, i, uint64(len(m.SchemaID))) - i-- - dAtA[i] = 0x1a - } - if m.Cid != nil { - { - size := m.Cid.Size() - i -= size - if _, err := m.Cid.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintNet(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.DocKey != nil { - { - size := m.DocKey.Size() - i -= size - if _, err := m.DocKey.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintNet(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (x *RemoveP2PCollectionsRequest) Reset() { + *x = RemoveP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return len(dAtA) - i, nil } -func (m *GetHeadLogRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *RemoveP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetHeadLogRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (*RemoveP2PCollectionsRequest) ProtoMessage() {} + +func (x *RemoveP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *GetHeadLogRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +// Deprecated: Use RemoveP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*RemoveP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{19} } -func (m *PushLogReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *RemoveP2PCollectionsRequest) GetCollections() []string { + if x != nil { + return x.Collections } - return dAtA[:n], nil + return nil } -func (m *PushLogReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type RemoveP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *PushLogReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` } -func (m *GetHeadLogReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *RemoveP2PCollectionsReply) Reset() { + *x = RemoveP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *GetHeadLogReply) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *RemoveP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetHeadLogReply) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} +func (*RemoveP2PCollectionsReply) ProtoMessage() {} -func encodeVarintNet(dAtA []byte, offset int, v uint64) int { - offset -= sovNet(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (x *RemoveP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - dAtA[offset] = uint8(v) - return base + return mi.MessageOf(x) } -func (m *Document) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DocKey != nil { - l = m.DocKey.Size() - n += 1 + l + sovNet(uint64(l)) - } - if m.Head != nil { - l = m.Head.Size() - n += 1 + l + sovNet(uint64(l)) - } - return n + +// Deprecated: Use RemoveP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*RemoveP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{20} } -func (m *Document_Log) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Block) - if l > 0 { - n += 1 + l + sovNet(uint64(l)) +func (x *RemoveP2PCollectionsReply) GetErr() string { + if x != nil { + return x.Err } - return n + return "" } -func (m *GetDocGraphRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +type GetAllP2PCollectionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *GetDocGraphReply) Size() (n int) { - if m == nil { - return 0 +func (x *GetAllP2PCollectionsRequest) Reset() { + *x = GetAllP2PCollectionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - var l int - _ = l - return n } -func (m *PushDocGraphRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +func (x *GetAllP2PCollectionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PushDocGraphReply) Size() (n int) { - if m == nil { - return 0 +func (*GetAllP2PCollectionsRequest) ProtoMessage() {} + +func (x *GetAllP2PCollectionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - var l int - _ = l - return n + return mi.MessageOf(x) } -func (m *GetLogRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +// Deprecated: Use GetAllP2PCollectionsRequest.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsRequest) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{21} } -func (m *GetLogReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +type GetAllP2PCollectionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []*GetAllP2PCollectionsReply_Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` } -func (m *PushLogRequest) Size() (n int) { - if m == nil { - return 0 +func (x *GetAllP2PCollectionsReply) Reset() { + *x = GetAllP2PCollectionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - var l int - _ = l - if m.Body != nil { - l = m.Body.Size() - n += 1 + l + sovNet(uint64(l)) - } - return n } -func (m *PushLogRequest_Body) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DocKey != nil { - l = m.DocKey.Size() - n += 1 + l + sovNet(uint64(l)) - } - if m.Cid != nil { - l = m.Cid.Size() - n += 1 + l + sovNet(uint64(l)) - } - l = len(m.SchemaID) - if l > 0 { - n += 1 + l + sovNet(uint64(l)) - } - l = len(m.Creator) - if l > 0 { - n += 1 + l + sovNet(uint64(l)) - } - if m.Log != nil { - l = m.Log.Size() - n += 1 + l + sovNet(uint64(l)) - } - return n +func (x *GetAllP2PCollectionsReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetHeadLogRequest) Size() (n int) { - if m == nil { - return 0 +func (*GetAllP2PCollectionsReply) ProtoMessage() {} + +func (x *GetAllP2PCollectionsReply) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - var l int - _ = l - return n + return mi.MessageOf(x) } -func (m *PushLogReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +// Deprecated: Use GetAllP2PCollectionsReply.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsReply) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{22} } -func (m *GetHeadLogReply) Size() (n int) { - if m == nil { - return 0 +func (x *GetAllP2PCollectionsReply) GetCollections() []*GetAllP2PCollectionsReply_Collection { + if x != nil { + return x.Collections } - var l int - _ = l - return n + return nil } -func sovNet(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +// Record is a thread record containing link data. +type Document_Log struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // block is the top-level node's raw data as an ipld.Block. + Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` } -func sozNet(x uint64) (n int) { - return sovNet(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + +func (x *Document_Log) Reset() { + *x = Document_Log{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Document) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Document: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Document: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var v ProtoDocKey - m.DocKey = &v - if err := m.DocKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Head", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var v ProtoCid - m.Head = &v - if err := m.Head.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + +func (x *Document_Log) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Document_Log) ProtoMessage() {} + +func (x *Document_Log) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} - if iNdEx > l { - return io.ErrUnexpectedEOF +// Deprecated: Use Document_Log.ProtoReflect.Descriptor instead. +func (*Document_Log) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Document_Log) GetBlock() []byte { + if x != nil { + return x.Block } return nil } -func (m *Document_Log) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Log: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Log: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Block = append(m.Block[:0], dAtA[iNdEx:postIndex]...) - if m.Block == nil { - m.Block = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +type PushLogRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // docKey is the DocKey of the document that is affected by the log. + DocKey []byte `protobuf:"bytes,1,opt,name=docKey,proto3" json:"docKey,omitempty"` + // cid is the CID of the composite of the document. + Cid []byte `protobuf:"bytes,2,opt,name=cid,proto3" json:"cid,omitempty"` + // schemaID is the SchemaID of the collection that the document resides in. + SchemaID []byte `protobuf:"bytes,3,opt,name=schemaID,proto3" json:"schemaID,omitempty"` + // creator is the PeerID of the peer that created the log. + Creator string `protobuf:"bytes,4,opt,name=creator,proto3" json:"creator,omitempty"` + // log hold the block that represent version of the document. + Log *Document_Log `protobuf:"bytes,6,opt,name=log,proto3" json:"log,omitempty"` +} + +func (x *PushLogRequest_Body) Reset() { + *x = PushLogRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *GetDocGraphRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetDocGraphRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + +func (x *PushLogRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushLogRequest_Body) ProtoMessage() {} + +func (x *PushLogRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use PushLogRequest_Body.ProtoReflect.Descriptor instead. +func (*PushLogRequest_Body) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{7, 0} +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *PushLogRequest_Body) GetDocKey() []byte { + if x != nil { + return x.DocKey } return nil } -func (m *GetDocGraphReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetDocGraphReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *PushLogRequest_Body) GetCid() []byte { + if x != nil { + return x.Cid } return nil } -func (m *PushDocGraphRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushDocGraphRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *PushLogRequest_Body) GetSchemaID() []byte { + if x != nil { + return x.SchemaID } return nil } -func (m *PushDocGraphReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushDocGraphReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + +func (x *PushLogRequest_Body) GetCreator() string { + if x != nil { + return x.Creator } + return "" +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *PushLogRequest_Body) GetLog() *Document_Log { + if x != nil { + return x.Log } return nil } -func (m *GetLogRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +type GetAllReplicatorReply_Replicators struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *GetAllReplicatorReply_Replicators_Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` + Schemas []string `protobuf:"bytes,2,rep,name=schemas,proto3" json:"schemas,omitempty"` +} + +func (x *GetAllReplicatorReply_Replicators) Reset() { + *x = GetAllReplicatorReply_Replicators{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *GetLogReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLogReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLogReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + +func (x *GetAllReplicatorReply_Replicators) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllReplicatorReply_Replicators) ProtoMessage() {} + +func (x *GetAllReplicatorReply_Replicators) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAllReplicatorReply_Replicators.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply_Replicators) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{16, 0} +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *GetAllReplicatorReply_Replicators) GetInfo() *GetAllReplicatorReply_Replicators_Info { + if x != nil { + return x.Info } return nil } -func (m *PushLogRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Body == nil { - m.Body = &PushLogRequest_Body{} - } - if err := m.Body.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *GetAllReplicatorReply_Replicators) GetSchemas() []string { + if x != nil { + return x.Schemas } return nil } -func (m *PushLogRequest_Body) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Body: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Body: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var v ProtoDocKey - m.DocKey = &v - if err := m.DocKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var v ProtoCid - m.Cid = &v - if err := m.Cid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaID = append(m.SchemaID[:0], dAtA[iNdEx:postIndex]...) - if m.SchemaID == nil { - m.SchemaID = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Creator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNet - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNet - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Log == nil { - m.Log = &Document_Log{} - } - if err := m.Log.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +type GetAllReplicatorReply_Replicators_Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Addrs []byte `protobuf:"bytes,2,opt,name=addrs,proto3" json:"addrs,omitempty"` +} + +func (x *GetAllReplicatorReply_Replicators_Info) Reset() { + *x = GetAllReplicatorReply_Replicators_Info{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *GetHeadLogRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetHeadLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetHeadLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + +func (x *GetAllReplicatorReply_Replicators_Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllReplicatorReply_Replicators_Info) ProtoMessage() {} + +func (x *GetAllReplicatorReply_Replicators_Info) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAllReplicatorReply_Replicators_Info.ProtoReflect.Descriptor instead. +func (*GetAllReplicatorReply_Replicators_Info) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{16, 0, 0} +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *GetAllReplicatorReply_Replicators_Info) GetId() []byte { + if x != nil { + return x.Id } return nil } -func (m *PushLogReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushLogReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushLogReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *GetAllReplicatorReply_Replicators_Info) GetAddrs() []byte { + if x != nil { + return x.Addrs } return nil } -func (m *GetHeadLogReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNet - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetHeadLogReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetHeadLogReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipNet(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNet - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + +type GetAllP2PCollectionsReply_Collection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetAllP2PCollectionsReply_Collection) Reset() { + *x = GetAllP2PCollectionsReply_Collection{} + if protoimpl.UnsafeEnabled { + mi := &file_net_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAllP2PCollectionsReply_Collection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllP2PCollectionsReply_Collection) ProtoMessage() {} + +func (x *GetAllP2PCollectionsReply_Collection) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAllP2PCollectionsReply_Collection.ProtoReflect.Descriptor instead. +func (*GetAllP2PCollectionsReply_Collection) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{22, 0} +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *GetAllP2PCollectionsReply_Collection) GetId() string { + if x != nil { + return x.Id } - return nil + return "" } -func skipNet(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNet - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNet - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNet - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthNet - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupNet - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthNet - } - if depth == 0 { - return iNdEx, nil - } + +func (x *GetAllP2PCollectionsReply_Collection) GetName() string { + if x != nil { + return x.Name } - return 0, io.ErrUnexpectedEOF + return "" +} + +var File_net_proto protoreflect.FileDescriptor + +var file_net_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x6e, 0x65, 0x74, + 0x2e, 0x70, 0x62, 0x22, 0x53, 0x0a, 0x08, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x64, 0x6f, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x1a, 0x1b, 0x0a, 0x03, 0x4c, + 0x6f, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x14, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, + 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x12, + 0x0a, 0x10, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x22, 0x15, 0x0a, 0x13, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, + 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x50, 0x75, 0x73, + 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x0f, + 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x0d, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0xd2, + 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, + 0x64, 0x79, 0x1a, 0x8e, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x6f, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x03, 0x63, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, + 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, + 0x44, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x26, 0x0a, 0x03, 0x6c, + 0x6f, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, + 0x62, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x03, + 0x6c, 0x6f, 0x67, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, + 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x11, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x4c, 0x0a, 0x14, 0x53, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x22, 0x2c, 0x0a, 0x12, 0x53, 0x65, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, 0x53, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2f, 0x0a, 0x15, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, 0x19, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, + 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x4b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x73, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x1a, + 0x99, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, + 0x42, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, + 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x1a, 0x2c, 0x0a, + 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x22, 0x3c, 0x0a, 0x18, 0x41, + 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2a, 0x0a, 0x16, 0x41, 0x64, 0x64, + 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x3f, 0x0a, 0x1b, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, + 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, + 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, + 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x1a, 0x30, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xd1, 0x02, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x45, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, + 0x1a, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, + 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x6e, 0x65, + 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, 0x44, + 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x1b, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x00, 0x12, 0x36, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x12, 0x15, 0x2e, 0x6e, 0x65, + 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, + 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x07, 0x50, 0x75, 0x73, + 0x68, 0x4c, 0x6f, 0x67, 0x12, 0x16, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x6e, + 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, + 0x6f, 0x67, 0x12, 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, + 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, + 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x32, 0xa3, 0x04, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, + 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x74, 0x2e, + 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x11, 0x47, 0x65, + 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, + 0x1f, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x00, 0x12, 0x57, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, + 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, + 0x62, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, + 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, + 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6e, 0x65, 0x74, 0x2e, + 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, + 0x5a, 0x08, 0x2f, 0x3b, 0x6e, 0x65, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( - ErrInvalidLengthNet = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowNet = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupNet = fmt.Errorf("proto: unexpected end of group") + file_net_proto_rawDescOnce sync.Once + file_net_proto_rawDescData = file_net_proto_rawDesc ) + +func file_net_proto_rawDescGZIP() []byte { + file_net_proto_rawDescOnce.Do(func() { + file_net_proto_rawDescData = protoimpl.X.CompressGZIP(file_net_proto_rawDescData) + }) + return file_net_proto_rawDescData +} + +var file_net_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_net_proto_goTypes = []interface{}{ + (*Document)(nil), // 0: net.pb.Document + (*GetDocGraphRequest)(nil), // 1: net.pb.GetDocGraphRequest + (*GetDocGraphReply)(nil), // 2: net.pb.GetDocGraphReply + (*PushDocGraphRequest)(nil), // 3: net.pb.PushDocGraphRequest + (*PushDocGraphReply)(nil), // 4: net.pb.PushDocGraphReply + (*GetLogRequest)(nil), // 5: net.pb.GetLogRequest + (*GetLogReply)(nil), // 6: net.pb.GetLogReply + (*PushLogRequest)(nil), // 7: net.pb.PushLogRequest + (*GetHeadLogRequest)(nil), // 8: net.pb.GetHeadLogRequest + (*PushLogReply)(nil), // 9: net.pb.PushLogReply + (*GetHeadLogReply)(nil), // 10: net.pb.GetHeadLogReply + (*SetReplicatorRequest)(nil), // 11: net.pb.SetReplicatorRequest + (*SetReplicatorReply)(nil), // 12: net.pb.SetReplicatorReply + (*DeleteReplicatorRequest)(nil), // 13: net.pb.DeleteReplicatorRequest + (*DeleteReplicatorReply)(nil), // 14: net.pb.DeleteReplicatorReply + (*GetAllReplicatorRequest)(nil), // 15: net.pb.GetAllReplicatorRequest + (*GetAllReplicatorReply)(nil), // 16: net.pb.GetAllReplicatorReply + (*AddP2PCollectionsRequest)(nil), // 17: net.pb.AddP2PCollectionsRequest + (*AddP2PCollectionsReply)(nil), // 18: net.pb.AddP2PCollectionsReply + (*RemoveP2PCollectionsRequest)(nil), // 19: net.pb.RemoveP2PCollectionsRequest + (*RemoveP2PCollectionsReply)(nil), // 20: net.pb.RemoveP2PCollectionsReply + (*GetAllP2PCollectionsRequest)(nil), // 21: net.pb.GetAllP2PCollectionsRequest + (*GetAllP2PCollectionsReply)(nil), // 22: net.pb.GetAllP2PCollectionsReply + (*Document_Log)(nil), // 23: net.pb.Document.Log + (*PushLogRequest_Body)(nil), // 24: net.pb.PushLogRequest.Body + (*GetAllReplicatorReply_Replicators)(nil), // 25: net.pb.GetAllReplicatorReply.Replicators + (*GetAllReplicatorReply_Replicators_Info)(nil), // 26: net.pb.GetAllReplicatorReply.Replicators.Info + (*GetAllP2PCollectionsReply_Collection)(nil), // 27: net.pb.GetAllP2PCollectionsReply.Collection +} +var file_net_proto_depIdxs = []int32{ + 24, // 0: net.pb.PushLogRequest.body:type_name -> net.pb.PushLogRequest.Body + 25, // 1: net.pb.GetAllReplicatorReply.replicators:type_name -> net.pb.GetAllReplicatorReply.Replicators + 27, // 2: net.pb.GetAllP2PCollectionsReply.collections:type_name -> net.pb.GetAllP2PCollectionsReply.Collection + 23, // 3: net.pb.PushLogRequest.Body.log:type_name -> net.pb.Document.Log + 26, // 4: net.pb.GetAllReplicatorReply.Replicators.info:type_name -> net.pb.GetAllReplicatorReply.Replicators.Info + 1, // 5: net.pb.Service.GetDocGraph:input_type -> net.pb.GetDocGraphRequest + 3, // 6: net.pb.Service.PushDocGraph:input_type -> net.pb.PushDocGraphRequest + 5, // 7: net.pb.Service.GetLog:input_type -> net.pb.GetLogRequest + 7, // 8: net.pb.Service.PushLog:input_type -> net.pb.PushLogRequest + 8, // 9: net.pb.Service.GetHeadLog:input_type -> net.pb.GetHeadLogRequest + 11, // 10: net.pb.Collection.SetReplicator:input_type -> net.pb.SetReplicatorRequest + 13, // 11: net.pb.Collection.DeleteReplicator:input_type -> net.pb.DeleteReplicatorRequest + 15, // 12: net.pb.Collection.GetAllReplicators:input_type -> net.pb.GetAllReplicatorRequest + 17, // 13: net.pb.Collection.AddP2PCollections:input_type -> net.pb.AddP2PCollectionsRequest + 19, // 14: net.pb.Collection.RemoveP2PCollections:input_type -> net.pb.RemoveP2PCollectionsRequest + 21, // 15: net.pb.Collection.GetAllP2PCollections:input_type -> net.pb.GetAllP2PCollectionsRequest + 2, // 16: net.pb.Service.GetDocGraph:output_type -> net.pb.GetDocGraphReply + 4, // 17: net.pb.Service.PushDocGraph:output_type -> net.pb.PushDocGraphReply + 6, // 18: net.pb.Service.GetLog:output_type -> net.pb.GetLogReply + 9, // 19: net.pb.Service.PushLog:output_type -> net.pb.PushLogReply + 10, // 20: net.pb.Service.GetHeadLog:output_type -> net.pb.GetHeadLogReply + 12, // 21: net.pb.Collection.SetReplicator:output_type -> net.pb.SetReplicatorReply + 14, // 22: net.pb.Collection.DeleteReplicator:output_type -> net.pb.DeleteReplicatorReply + 16, // 23: net.pb.Collection.GetAllReplicators:output_type -> net.pb.GetAllReplicatorReply + 18, // 24: net.pb.Collection.AddP2PCollections:output_type -> net.pb.AddP2PCollectionsReply + 20, // 25: net.pb.Collection.RemoveP2PCollections:output_type -> net.pb.RemoveP2PCollectionsReply + 22, // 26: net.pb.Collection.GetAllP2PCollections:output_type -> net.pb.GetAllP2PCollectionsReply + 16, // [16:27] is the sub-list for method output_type + 5, // [5:16] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_net_proto_init() } +func file_net_proto_init() { + if File_net_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_net_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDocGraphRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDocGraphReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushDocGraphRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushDocGraphReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetLogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetLogReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushLogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetHeadLogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushLogReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetHeadLogReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document_Log); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushLogRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply_Replicators); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllReplicatorReply_Replicators_Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_net_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAllP2PCollectionsReply_Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_net_proto_rawDesc, + NumEnums: 0, + NumMessages: 28, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_net_proto_goTypes, + DependencyIndexes: file_net_proto_depIdxs, + MessageInfos: file_net_proto_msgTypes, + }.Build() + File_net_proto = out.File + file_net_proto_rawDesc = nil + file_net_proto_goTypes = nil + file_net_proto_depIdxs = nil +} diff --git a/net/pb/net.proto b/net/pb/net.proto index ae69b2c47d..a4799a1d89 100644 --- a/net/pb/net.proto +++ b/net/pb/net.proto @@ -1,14 +1,14 @@ syntax = "proto3"; package net.pb; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +option go_package = "/;net_pb"; // Log represents a thread log. message Document { // ID of the document. - bytes docKey = 1 [(gogoproto.customtype) = "ProtoDocKey"]; + bytes docKey = 1; // head of the log. - bytes head = 4 [(gogoproto.customtype) = "ProtoCid"]; + bytes head = 4; // Record is a thread record containing link data. message Log { @@ -34,15 +34,15 @@ message PushLogRequest { message Body { // docKey is the DocKey of the document that is affected by the log. - bytes docKey = 1 [(gogoproto.customtype) = "ProtoDocKey"]; + bytes docKey = 1; // cid is the CID of the composite of the document. - bytes cid = 2 [(gogoproto.customtype) = "ProtoCid"]; + bytes cid = 2; // schemaID is the SchemaID of the collection that the document resides in. bytes schemaID = 3; // creator is the PeerID of the peer that created the log. string creator = 4; // log hold the block that represent version of the document. - Document.Log log = 5; + Document.Log log = 6; } } @@ -64,4 +64,83 @@ service Service { rpc PushLog(PushLogRequest) returns (PushLogReply) {} // GetHeadLog from this peer rpc GetHeadLog(GetHeadLogRequest) returns (GetHeadLogReply) {} +} + +message SetReplicatorRequest { + repeated string collections = 1; + bytes addr = 2; +} + +message SetReplicatorReply { + bytes peerID = 1; +} + +message DeleteReplicatorRequest { + bytes peerID = 1; + repeated string collections = 2; +} + +message DeleteReplicatorReply { + bytes peerID = 1; +} + +message GetAllReplicatorRequest {} + +message GetAllReplicatorReply { + message Replicators { + message Info { + bytes id = 1; + bytes addrs = 2; + } + Info info = 1; + repeated string schemas = 2; + } + + repeated Replicators replicators = 1; + +} + +message AddP2PCollectionsRequest { + repeated string collections = 1; +} + +message AddP2PCollectionsReply { + string err = 1; +} + +message RemoveP2PCollectionsRequest { + repeated string collections = 1; +} + +message RemoveP2PCollectionsReply { + string err = 1; +} + +message GetAllP2PCollectionsRequest {} + +message GetAllP2PCollectionsReply { + message Collection { + string id = 1; + string name = 2; + } + repeated Collection collections = 1; +} + + +// Collection is the peer-to-peer network API for document sync by replication and subscription to collections +service Collection { + // SetReplicator for this peer + rpc SetReplicator(SetReplicatorRequest) returns (SetReplicatorReply) {} + + // DeleteReplicator for this peer + rpc DeleteReplicator(DeleteReplicatorRequest) returns (DeleteReplicatorReply) {} + + // DeleteReplicator for this peer + rpc GetAllReplicators(GetAllReplicatorRequest) returns (GetAllReplicatorReply) {} + + rpc AddP2PCollections(AddP2PCollectionsRequest) returns (AddP2PCollectionsReply) {} + + rpc RemoveP2PCollections(RemoveP2PCollectionsRequest) returns (RemoveP2PCollectionsReply) {} + + rpc GetAllP2PCollections(GetAllP2PCollectionsRequest) returns (GetAllP2PCollectionsReply) {} } \ No newline at end of file diff --git a/net/pb/net_grpc.pb.go b/net/pb/net_grpc.pb.go new file mode 100644 index 0000000000..e50cbec859 --- /dev/null +++ b/net/pb/net_grpc.pb.go @@ -0,0 +1,548 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.9 +// source: net.proto + +package net_pb + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Service_GetDocGraph_FullMethodName = "/net.pb.Service/GetDocGraph" + Service_PushDocGraph_FullMethodName = "/net.pb.Service/PushDocGraph" + Service_GetLog_FullMethodName = "/net.pb.Service/GetLog" + Service_PushLog_FullMethodName = "/net.pb.Service/PushLog" + Service_GetHeadLog_FullMethodName = "/net.pb.Service/GetHeadLog" +) + +// ServiceClient is the client API for Service service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServiceClient interface { + // GetDocGraph from this peer. + GetDocGraph(ctx context.Context, in *GetDocGraphRequest, opts ...grpc.CallOption) (*GetDocGraphReply, error) + // PushDocGraph to this peer. + PushDocGraph(ctx context.Context, in *PushDocGraphRequest, opts ...grpc.CallOption) (*PushDocGraphReply, error) + // GetLog from this peer. + GetLog(ctx context.Context, in *GetLogRequest, opts ...grpc.CallOption) (*GetLogReply, error) + // PushLog to this peer. + PushLog(ctx context.Context, in *PushLogRequest, opts ...grpc.CallOption) (*PushLogReply, error) + // GetHeadLog from this peer + GetHeadLog(ctx context.Context, in *GetHeadLogRequest, opts ...grpc.CallOption) (*GetHeadLogReply, error) +} + +type serviceClient struct { + cc grpc.ClientConnInterface +} + +func NewServiceClient(cc grpc.ClientConnInterface) ServiceClient { + return &serviceClient{cc} +} + +func (c *serviceClient) GetDocGraph(ctx context.Context, in *GetDocGraphRequest, opts ...grpc.CallOption) (*GetDocGraphReply, error) { + out := new(GetDocGraphReply) + err := c.cc.Invoke(ctx, Service_GetDocGraph_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) PushDocGraph(ctx context.Context, in *PushDocGraphRequest, opts ...grpc.CallOption) (*PushDocGraphReply, error) { + out := new(PushDocGraphReply) + err := c.cc.Invoke(ctx, Service_PushDocGraph_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) GetLog(ctx context.Context, in *GetLogRequest, opts ...grpc.CallOption) (*GetLogReply, error) { + out := new(GetLogReply) + err := c.cc.Invoke(ctx, Service_GetLog_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) PushLog(ctx context.Context, in *PushLogRequest, opts ...grpc.CallOption) (*PushLogReply, error) { + out := new(PushLogReply) + err := c.cc.Invoke(ctx, Service_PushLog_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *serviceClient) GetHeadLog(ctx context.Context, in *GetHeadLogRequest, opts ...grpc.CallOption) (*GetHeadLogReply, error) { + out := new(GetHeadLogReply) + err := c.cc.Invoke(ctx, Service_GetHeadLog_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ServiceServer is the server API for Service service. +// All implementations must embed UnimplementedServiceServer +// for forward compatibility +type ServiceServer interface { + // GetDocGraph from this peer. + GetDocGraph(context.Context, *GetDocGraphRequest) (*GetDocGraphReply, error) + // PushDocGraph to this peer. + PushDocGraph(context.Context, *PushDocGraphRequest) (*PushDocGraphReply, error) + // GetLog from this peer. + GetLog(context.Context, *GetLogRequest) (*GetLogReply, error) + // PushLog to this peer. + PushLog(context.Context, *PushLogRequest) (*PushLogReply, error) + // GetHeadLog from this peer + GetHeadLog(context.Context, *GetHeadLogRequest) (*GetHeadLogReply, error) + mustEmbedUnimplementedServiceServer() +} + +// UnimplementedServiceServer must be embedded to have forward compatible implementations. +type UnimplementedServiceServer struct { +} + +func (UnimplementedServiceServer) GetDocGraph(context.Context, *GetDocGraphRequest) (*GetDocGraphReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetDocGraph not implemented") +} +func (UnimplementedServiceServer) PushDocGraph(context.Context, *PushDocGraphRequest) (*PushDocGraphReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method PushDocGraph not implemented") +} +func (UnimplementedServiceServer) GetLog(context.Context, *GetLogRequest) (*GetLogReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetLog not implemented") +} +func (UnimplementedServiceServer) PushLog(context.Context, *PushLogRequest) (*PushLogReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method PushLog not implemented") +} +func (UnimplementedServiceServer) GetHeadLog(context.Context, *GetHeadLogRequest) (*GetHeadLogReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetHeadLog not implemented") +} +func (UnimplementedServiceServer) mustEmbedUnimplementedServiceServer() {} + +// UnsafeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServiceServer will +// result in compilation errors. +type UnsafeServiceServer interface { + mustEmbedUnimplementedServiceServer() +} + +func RegisterServiceServer(s grpc.ServiceRegistrar, srv ServiceServer) { + s.RegisterService(&Service_ServiceDesc, srv) +} + +func _Service_GetDocGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDocGraphRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).GetDocGraph(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_GetDocGraph_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).GetDocGraph(ctx, req.(*GetDocGraphRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_PushDocGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PushDocGraphRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).PushDocGraph(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_PushDocGraph_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).PushDocGraph(ctx, req.(*PushDocGraphRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_GetLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).GetLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_GetLog_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).GetLog(ctx, req.(*GetLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_PushLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PushLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).PushLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_PushLog_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).PushLog(ctx, req.(*PushLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Service_GetHeadLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetHeadLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ServiceServer).GetHeadLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Service_GetHeadLog_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ServiceServer).GetHeadLog(ctx, req.(*GetHeadLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Service_ServiceDesc is the grpc.ServiceDesc for Service service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Service_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "net.pb.Service", + HandlerType: (*ServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetDocGraph", + Handler: _Service_GetDocGraph_Handler, + }, + { + MethodName: "PushDocGraph", + Handler: _Service_PushDocGraph_Handler, + }, + { + MethodName: "GetLog", + Handler: _Service_GetLog_Handler, + }, + { + MethodName: "PushLog", + Handler: _Service_PushLog_Handler, + }, + { + MethodName: "GetHeadLog", + Handler: _Service_GetHeadLog_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "net.proto", +} + +const ( + Collection_SetReplicator_FullMethodName = "/net.pb.Collection/SetReplicator" + Collection_DeleteReplicator_FullMethodName = "/net.pb.Collection/DeleteReplicator" + Collection_GetAllReplicators_FullMethodName = "/net.pb.Collection/GetAllReplicators" + Collection_AddP2PCollections_FullMethodName = "/net.pb.Collection/AddP2PCollections" + Collection_RemoveP2PCollections_FullMethodName = "/net.pb.Collection/RemoveP2PCollections" + Collection_GetAllP2PCollections_FullMethodName = "/net.pb.Collection/GetAllP2PCollections" +) + +// CollectionClient is the client API for Collection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CollectionClient interface { + // SetReplicator for this peer + SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) + // DeleteReplicator for this peer + DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) + // DeleteReplicator for this peer + GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) + AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) + RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) + GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) +} + +type collectionClient struct { + cc grpc.ClientConnInterface +} + +func NewCollectionClient(cc grpc.ClientConnInterface) CollectionClient { + return &collectionClient{cc} +} + +func (c *collectionClient) SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) { + out := new(SetReplicatorReply) + err := c.cc.Invoke(ctx, Collection_SetReplicator_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *collectionClient) DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) { + out := new(DeleteReplicatorReply) + err := c.cc.Invoke(ctx, Collection_DeleteReplicator_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *collectionClient) GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) { + out := new(GetAllReplicatorReply) + err := c.cc.Invoke(ctx, Collection_GetAllReplicators_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *collectionClient) AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) { + out := new(AddP2PCollectionsReply) + err := c.cc.Invoke(ctx, Collection_AddP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *collectionClient) RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) { + out := new(RemoveP2PCollectionsReply) + err := c.cc.Invoke(ctx, Collection_RemoveP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *collectionClient) GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) { + out := new(GetAllP2PCollectionsReply) + err := c.cc.Invoke(ctx, Collection_GetAllP2PCollections_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CollectionServer is the server API for Collection service. +// All implementations must embed UnimplementedCollectionServer +// for forward compatibility +type CollectionServer interface { + // SetReplicator for this peer + SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) + // DeleteReplicator for this peer + DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) + // DeleteReplicator for this peer + GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) + AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) + RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) + GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) + mustEmbedUnimplementedCollectionServer() +} + +// UnimplementedCollectionServer must be embedded to have forward compatible implementations. +type UnimplementedCollectionServer struct { +} + +func (UnimplementedCollectionServer) SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetReplicator not implemented") +} +func (UnimplementedCollectionServer) DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteReplicator not implemented") +} +func (UnimplementedCollectionServer) GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAllReplicators not implemented") +} +func (UnimplementedCollectionServer) AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddP2PCollections not implemented") +} +func (UnimplementedCollectionServer) RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveP2PCollections not implemented") +} +func (UnimplementedCollectionServer) GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAllP2PCollections not implemented") +} +func (UnimplementedCollectionServer) mustEmbedUnimplementedCollectionServer() {} + +// UnsafeCollectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CollectionServer will +// result in compilation errors. +type UnsafeCollectionServer interface { + mustEmbedUnimplementedCollectionServer() +} + +func RegisterCollectionServer(s grpc.ServiceRegistrar, srv CollectionServer) { + s.RegisterService(&Collection_ServiceDesc, srv) +} + +func _Collection_SetReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).SetReplicator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_SetReplicator_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).SetReplicator(ctx, req.(*SetReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Collection_DeleteReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).DeleteReplicator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_DeleteReplicator_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).DeleteReplicator(ctx, req.(*DeleteReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Collection_GetAllReplicators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllReplicatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).GetAllReplicators(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_GetAllReplicators_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).GetAllReplicators(ctx, req.(*GetAllReplicatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Collection_AddP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).AddP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_AddP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).AddP2PCollections(ctx, req.(*AddP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Collection_RemoveP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).RemoveP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_RemoveP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).RemoveP2PCollections(ctx, req.(*RemoveP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Collection_GetAllP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllP2PCollectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectionServer).GetAllP2PCollections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collection_GetAllP2PCollections_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectionServer).GetAllP2PCollections(ctx, req.(*GetAllP2PCollectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Collection_ServiceDesc is the grpc.ServiceDesc for Collection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Collection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "net.pb.Collection", + HandlerType: (*CollectionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SetReplicator", + Handler: _Collection_SetReplicator_Handler, + }, + { + MethodName: "DeleteReplicator", + Handler: _Collection_DeleteReplicator_Handler, + }, + { + MethodName: "GetAllReplicators", + Handler: _Collection_GetAllReplicators_Handler, + }, + { + MethodName: "AddP2PCollections", + Handler: _Collection_AddP2PCollections_Handler, + }, + { + MethodName: "RemoveP2PCollections", + Handler: _Collection_RemoveP2PCollections_Handler, + }, + { + MethodName: "GetAllP2PCollections", + Handler: _Collection_GetAllP2PCollections_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "net.proto", +} diff --git a/net/pb/net_vtproto.pb.go b/net/pb/net_vtproto.pb.go new file mode 100644 index 0000000000..9ac8b5c379 --- /dev/null +++ b/net/pb/net_vtproto.pb.go @@ -0,0 +1,3998 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: net.proto + +package net_pb + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Document_Log) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Document_Log) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Document_Log) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Block) > 0 { + i -= len(m.Block) + copy(dAtA[i:], m.Block) + i = encodeVarint(dAtA, i, uint64(len(m.Block))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Document) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Document) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Document) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Head) > 0 { + i -= len(m.Head) + copy(dAtA[i:], m.Head) + i = encodeVarint(dAtA, i, uint64(len(m.Head))) + i-- + dAtA[i] = 0x22 + } + if len(m.DocKey) > 0 { + i -= len(m.DocKey) + copy(dAtA[i:], m.DocKey) + i = encodeVarint(dAtA, i, uint64(len(m.DocKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetDocGraphRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetDocGraphRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetDocGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetDocGraphReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetDocGraphReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetDocGraphReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PushDocGraphRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushDocGraphRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PushDocGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PushDocGraphReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushDocGraphReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PushDocGraphReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetLogRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetLogRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetLogRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetLogReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetLogReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetLogReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PushLogRequest_Body) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushLogRequest_Body) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PushLogRequest_Body) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Log != nil { + size, err := m.Log.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarint(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0x22 + } + if len(m.SchemaID) > 0 { + i -= len(m.SchemaID) + copy(dAtA[i:], m.SchemaID) + i = encodeVarint(dAtA, i, uint64(len(m.SchemaID))) + i-- + dAtA[i] = 0x1a + } + if len(m.Cid) > 0 { + i -= len(m.Cid) + copy(dAtA[i:], m.Cid) + i = encodeVarint(dAtA, i, uint64(len(m.Cid))) + i-- + dAtA[i] = 0x12 + } + if len(m.DocKey) > 0 { + i -= len(m.DocKey) + copy(dAtA[i:], m.DocKey) + i = encodeVarint(dAtA, i, uint64(len(m.DocKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PushLogRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushLogRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PushLogRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Body != nil { + size, err := m.Body.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetHeadLogRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetHeadLogRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetHeadLogRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *PushLogReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushLogReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PushLogReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetHeadLogReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetHeadLogReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetHeadLogReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SetReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Addr) > 0 { + i -= len(m.Addr) + copy(dAtA[i:], m.Addr) + i = encodeVarint(dAtA, i, uint64(len(m.Addr))) + i-- + dAtA[i] = 0x12 + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SetReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply_Replicators_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Addrs) > 0 { + i -= len(m.Addrs) + copy(dAtA[i:], m.Addrs) + i = encodeVarint(dAtA, i, uint64(len(m.Addrs))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply_Replicators) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply_Replicators) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply_Replicators) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Schemas) > 0 { + for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Schemas[iNdEx]) + copy(dAtA[i:], m.Schemas[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Schemas[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Info != nil { + size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllReplicatorReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Replicators) > 0 { + for iNdEx := len(m.Replicators) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Replicators[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AddP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AddP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Err) > 0 { + i -= len(m.Err) + copy(dAtA[i:], m.Err) + i = encodeVarint(dAtA, i, uint64(len(m.Err))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Collections[iNdEx]) + copy(dAtA[i:], m.Collections[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RemoveP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Err) > 0 { + i -= len(m.Err) + copy(dAtA[i:], m.Err) + i = encodeVarint(dAtA, i, uint64(len(m.Err))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsReply_Collection) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetAllP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetAllP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetAllP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Collections) > 0 { + for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Collections[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Document_Log) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Block) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Document) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DocKey) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Head) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetDocGraphRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetDocGraphReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PushDocGraphRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PushDocGraphReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetLogRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetLogReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PushLogRequest_Body) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DocKey) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SchemaID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Creator) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Log != nil { + l = m.Log.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PushLogRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Body != nil { + l = m.Body.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetHeadLogRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PushLogReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetHeadLogReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SetReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply_Replicators_Info) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Addrs) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply_Replicators) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Schemas) > 0 { + for _, s := range m.Schemas { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllReplicatorReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Replicators) > 0 { + for _, e := range m.Replicators { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AddP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AddP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Err) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, s := range m.Collections { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Err) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsReply_Collection) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetAllP2PCollectionsReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Collections) > 0 { + for _, e := range m.Collections { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Document_Log) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Document_Log: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Document_Log: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Block = append(m.Block[:0], dAtA[iNdEx:postIndex]...) + if m.Block == nil { + m.Block = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Document) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Document: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Document: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) + if m.DocKey == nil { + m.DocKey = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Head", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Head = append(m.Head[:0], dAtA[iNdEx:postIndex]...) + if m.Head == nil { + m.Head = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetDocGraphRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetDocGraphRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetDocGraphReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetDocGraphReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushDocGraphRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushDocGraphRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushDocGraphReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushDocGraphReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetLogRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetLogRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetLogReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetLogReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetLogReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushLogRequest_Body) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushLogRequest_Body: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushLogRequest_Body: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) + if m.DocKey == nil { + m.DocKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cid = append(m.Cid[:0], dAtA[iNdEx:postIndex]...) + if m.Cid == nil { + m.Cid = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaID = append(m.SchemaID[:0], dAtA[iNdEx:postIndex]...) + if m.SchemaID == nil { + m.SchemaID = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Log == nil { + m.Log = &Document_Log{} + } + if err := m.Log.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushLogRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushLogRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Body == nil { + m.Body = &PushLogRequest_Body{} + } + if err := m.Body.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetHeadLogRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetHeadLogRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetHeadLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushLogReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushLogReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushLogReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetHeadLogReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetHeadLogReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetHeadLogReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) + if m.Addr == nil { + m.Addr = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) + if m.PeerID == nil { + m.PeerID = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) + if m.Id == nil { + m.Id = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addrs = append(m.Addrs[:0], dAtA[iNdEx:postIndex]...) + if m.Addrs == nil { + m.Addrs = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = &GetAllReplicatorReply_Replicators_Info{} + } + if err := m.Info.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schemas = append(m.Schemas, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllReplicatorReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Replicators = append(m.Replicators, &GetAllReplicatorReply_Replicators{}) + if err := m.Replicators[len(m.Replicators)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetAllP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetAllP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Collections = append(m.Collections, &GetAllP2PCollectionsReply_Collection{}) + if err := m.Collections[len(m.Collections)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/net/peer.go b/net/peer.go index bb3e11c420..26a24a38ae 100644 --- a/net/peer.go +++ b/net/peer.go @@ -34,6 +34,8 @@ import ( "github.com/libp2p/go-libp2p/core/routing" ma "github.com/multiformats/go-multiaddr" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" @@ -63,7 +65,7 @@ type Peer struct { ps *pubsub.PubSub server *server - p2pRPC *grpc.Server // rpc server over the p2p network + p2pRPC *grpc.Server // rpc server over the P2P network // Used to close the dagWorker pool for a given document. // The string represents a dockey. @@ -84,6 +86,8 @@ type Peer struct { ctx context.Context cancel context.CancelFunc + + pb.UnimplementedCollectionServer } // NewPeer creates a new instance of the DefraDB server as a peer-to-peer node. @@ -98,7 +102,7 @@ func NewPeer( dialOptions []grpc.DialOption, ) (*Peer, error) { if db == nil { - return nil, errors.New("database object can't be empty") + return nil, ErrNilDB } ctx, cancel := context.WithCancel(ctx) @@ -167,7 +171,7 @@ func (p *Peer) Start() error { if p.ps != nil { if !p.db.Events().Updates.HasValue() { - return errors.New("tried to subscribe to update channel, but update channel is nil") + return ErrNilUpdateChannel } updateChannel, err := p.db.Events().Updates.Value().Subscribe() @@ -180,7 +184,7 @@ func (p *Peer) Start() error { go p.handleBroadcastLoop() } - // register the p2p gRPC server + // register the P2P gRPC server go func() { pb.RegisterServiceServer(p.p2pRPC, p.server) if err := p.p2pRPC.Serve(p2plistener); err != nil && @@ -294,8 +298,8 @@ func (p *Peer) RegisterNewDocument( // publish log body := &pb.PushLogRequest_Body{ - DocKey: &pb.ProtoDocKey{DocKey: dockey}, - Cid: &pb.ProtoCid{Cid: c}, + DocKey: []byte(dockey.String()), + Cid: c.Bytes(), SchemaID: []byte(schemaID), Creator: p.host.ID().String(), Log: &pb.Document_Log{ @@ -309,25 +313,36 @@ func (p *Peer) RegisterNewDocument( return p.server.publishLog(p.ctx, schemaID, req) } +func marshalPeerID(id peer.ID) []byte { + b, _ := id.Marshal() // This will never return an error + return b +} + // SetReplicator adds a target peer node as a replication destination for documents in our DB. func (p *Peer) SetReplicator( ctx context.Context, - paddr ma.Multiaddr, - collectionNames ...string, -) (peer.ID, error) { + req *pb.SetReplicatorRequest, +) (*pb.SetReplicatorReply, error) { + addr, err := ma.NewMultiaddrBytes(req.Addr) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + txn, err := p.db.NewTxn(ctx, true) if err != nil { - return "", err + return nil, err } store := p.db.WithTxn(txn) - pid, err := p.setReplicator(ctx, store, paddr, collectionNames...) + pid, err := p.setReplicator(ctx, store, addr, req.Collections...) if err != nil { txn.Discard(ctx) - return "", err + return nil, err } - return pid, txn.Commit(ctx) + return &pb.SetReplicatorReply{ + PeerID: marshalPeerID(pid), + }, txn.Commit(ctx) } // setReplicator adds a target peer node as a replication destination for documents in our DB. @@ -511,28 +526,30 @@ func (p *Peer) pushToReplicator( } } -// DeleteReplicator adds a target peer node as a replication destination for documents in our DB. +// DeleteReplicator removes a peer node from the replicators. func (p *Peer) DeleteReplicator( ctx context.Context, - pid peer.ID, - collectionNames ...string, -) error { + req *pb.DeleteReplicatorRequest, +) (*pb.DeleteReplicatorReply, error) { + log.Debug(ctx, "Received DeleteReplicator request") + txn, err := p.db.NewTxn(ctx, true) if err != nil { - return err + return nil, err } store := p.db.WithTxn(txn) - err = p.deleteReplicator(ctx, store, pid, collectionNames...) + err = p.deleteReplicator(ctx, store, peer.ID(req.PeerID), req.Collections...) if err != nil { txn.Discard(ctx) - return err + return nil, err } - return txn.Commit(ctx) + return &pb.DeleteReplicatorReply{ + PeerID: req.PeerID, + }, txn.Commit(ctx) } -// DeleteReplicator adds a target peer node as a replication destination for documents in our DB. func (p *Peer) deleteReplicator( ctx context.Context, store client.Store, @@ -541,7 +558,7 @@ func (p *Peer) deleteReplicator( ) error { // make sure it's not ourselves if pid == p.host.ID() { - return errors.New("can't target ourselves as a replicator") + return ErrSelfTargetForReplicator } // verify collections @@ -595,9 +612,32 @@ func (p *Peer) deleteReplicator( }) } -// GetAllReplicators adds a target peer node as a replication destination for documents in our DB. -func (p *Peer) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - return p.db.GetAllReplicators(ctx) +// GetAllReplicators returns all replicators and the schemas that are replicated to them. +func (p *Peer) GetAllReplicators( + ctx context.Context, + req *pb.GetAllReplicatorRequest, +) (*pb.GetAllReplicatorReply, error) { + log.Debug(ctx, "Received GetAllReplicators request") + + reps, err := p.db.GetAllReplicators(ctx) + if err != nil { + return nil, err + } + + pbReps := []*pb.GetAllReplicatorReply_Replicators{} + for _, rep := range reps { + pbReps = append(pbReps, &pb.GetAllReplicatorReply_Replicators{ + Info: &pb.GetAllReplicatorReply_Replicators_Info{ + Id: []byte(rep.Info.ID), + Addrs: rep.Info.Addrs[0].Bytes(), + }, + Schemas: rep.Schemas, + }) + } + + return &pb.GetAllReplicatorReply{ + Replicators: pbReps, + }, nil } func (p *Peer) loadReplicators(ctx context.Context) error { @@ -651,7 +691,7 @@ func (p *Peer) loadP2PCollections(ctx context.Context) (map[string]struct{}, err func (p *Peer) handleDocCreateLog(evt events.Update) error { dockey, err := client.NewDocKeyFromString(evt.DocKey) if err != nil { - return errors.Wrap("failed to get DocKey from broadcast message", err) + return NewErrFailedToGetDockey(err) } // We need to register the document before pushing to the replicators if we want to @@ -669,7 +709,7 @@ func (p *Peer) handleDocCreateLog(evt events.Update) error { func (p *Peer) handleDocUpdateLog(evt events.Update) error { dockey, err := client.NewDocKeyFromString(evt.DocKey) if err != nil { - return errors.Wrap("failed to get DocKey from broadcast message", err) + return NewErrFailedToGetDockey(err) } log.Debug( p.ctx, @@ -679,8 +719,8 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { logging.NewKV("SchemaId", evt.SchemaID)) body := &pb.PushLogRequest_Body{ - DocKey: &pb.ProtoDocKey{DocKey: dockey}, - Cid: &pb.ProtoCid{Cid: evt.Cid}, + DocKey: []byte(dockey.String()), + Cid: evt.Cid.Bytes(), SchemaID: []byte(evt.SchemaID), Creator: p.host.ID().String(), Log: &pb.Document_Log{ @@ -695,11 +735,11 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { p.pushLogToReplicators(p.ctx, evt) if err := p.server.publishLog(p.ctx, evt.DocKey, req); err != nil { - return errors.Wrap(fmt.Sprintf("can't publish log %s for dockey %s", evt.Cid, evt.DocKey), err) + return NewErrPublishingToDockeyTopic(err, evt.Cid.String(), evt.DocKey) } if err := p.server.publishLog(p.ctx, evt.SchemaID, req); err != nil { - return errors.Wrap(fmt.Sprintf("can't publish log %s for schemaID %s", evt.Cid, evt.SchemaID), err) + return NewErrPublishingToSchemaTopic(err, evt.Cid.String(), evt.SchemaID) } return nil @@ -816,39 +856,44 @@ func (p *Peer) rollbackRemovePubSubTopics(topics []string, cause error) error { // changes to the server may still be applied. // // WARNING: Calling this on collections with a large number of documents may take a long time to process. -func (p *Peer) AddP2PCollections(collections []string) error { +func (p *Peer) AddP2PCollections( + ctx context.Context, + req *pb.AddP2PCollectionsRequest, +) (*pb.AddP2PCollectionsReply, error) { + log.Debug(ctx, "Received AddP2PCollections request") + txn, err := p.db.NewTxn(p.ctx, false) if err != nil { - return err + return nil, err } defer txn.Discard(p.ctx) store := p.db.WithTxn(txn) // first let's make sure the collections actually exists storeCollections := []client.Collection{} - for _, col := range collections { + for _, col := range req.Collections { storeCol, err := store.GetCollectionBySchemaID(p.ctx, col) if err != nil { - return err + return nil, err } storeCollections = append(storeCollections, storeCol) } // Ensure we can add all the collections to the store on the transaction // before adding to topics. - for _, col := range collections { + for _, col := range req.Collections { err := store.AddP2PCollection(p.ctx, col) if err != nil { - return err + return nil, err } } // Add pubsub topics and remove them if we get an error. addedTopics := []string{} - for _, col := range collections { + for _, col := range req.Collections { err = p.server.addPubSubTopic(col, true) if err != nil { - return p.rollbackAddPubSubTopics(addedTopics, err) + return nil, p.rollbackAddPubSubTopics(addedTopics, err) } addedTopics = append(addedTopics, col) } @@ -859,12 +904,12 @@ func (p *Peer) AddP2PCollections(collections []string) error { for _, col := range storeCollections { keyChan, err := col.GetAllDocKeys(p.ctx) if err != nil { - return err + return nil, err } for key := range keyChan { err := p.server.removePubSubTopic(key.Key.String()) if err != nil { - return p.rollbackRemovePubSubTopics(removedTopics, err) + return nil, p.rollbackRemovePubSubTopics(removedTopics, err) } removedTopics = append(removedTopics, key.Key.String()) } @@ -872,10 +917,10 @@ func (p *Peer) AddP2PCollections(collections []string) error { if err = txn.Commit(p.ctx); err != nil { err = p.rollbackRemovePubSubTopics(removedTopics, err) - return p.rollbackAddPubSubTopics(addedTopics, err) + return nil, p.rollbackAddPubSubTopics(addedTopics, err) } - return nil + return &pb.AddP2PCollectionsReply{}, nil } // RemoveP2PCollections removes the given collectionIDs from the pubsup topics. @@ -884,39 +929,44 @@ func (p *Peer) AddP2PCollections(collections []string) error { // changes to the server may still be applied. // // WARNING: Calling this on collections with a large number of documents may take a long time to process. -func (p *Peer) RemoveP2PCollections(collections []string) error { +func (p *Peer) RemoveP2PCollections( + ctx context.Context, + req *pb.RemoveP2PCollectionsRequest, +) (*pb.RemoveP2PCollectionsReply, error) { + log.Debug(ctx, "Received RemoveP2PCollections request") + txn, err := p.db.NewTxn(p.ctx, false) if err != nil { - return err + return nil, err } defer txn.Discard(p.ctx) store := p.db.WithTxn(txn) // first let's make sure the collections actually exists storeCollections := []client.Collection{} - for _, col := range collections { + for _, col := range req.Collections { storeCol, err := store.GetCollectionBySchemaID(p.ctx, col) if err != nil { - return err + return nil, err } storeCollections = append(storeCollections, storeCol) } // Ensure we can remove all the collections to the store on the transaction // before adding to topics. - for _, col := range collections { + for _, col := range req.Collections { err := store.RemoveP2PCollection(p.ctx, col) if err != nil { - return err + return nil, err } } // Remove pubsub topics and add them back if we get an error. removedTopics := []string{} - for _, col := range collections { + for _, col := range req.Collections { err = p.server.removePubSubTopic(col) if err != nil { - return p.rollbackRemovePubSubTopics(removedTopics, err) + return nil, p.rollbackRemovePubSubTopics(removedTopics, err) } removedTopics = append(removedTopics, col) } @@ -927,12 +977,12 @@ func (p *Peer) RemoveP2PCollections(collections []string) error { for _, col := range storeCollections { keyChan, err := col.GetAllDocKeys(p.ctx) if err != nil { - return err + return nil, err } for key := range keyChan { err := p.server.addPubSubTopic(key.Key.String(), true) if err != nil { - return p.rollbackAddPubSubTopics(addedTopics, err) + return nil, p.rollbackAddPubSubTopics(addedTopics, err) } addedTopics = append(addedTopics, key.Key.String()) } @@ -940,38 +990,45 @@ func (p *Peer) RemoveP2PCollections(collections []string) error { if err = txn.Commit(p.ctx); err != nil { err = p.rollbackAddPubSubTopics(addedTopics, err) - return p.rollbackRemovePubSubTopics(removedTopics, err) + return nil, p.rollbackRemovePubSubTopics(removedTopics, err) } - return nil + return &pb.RemoveP2PCollectionsReply{}, nil } -// GetAllP2PCollections gets all the collectionIDs that have been added to the -// pubsub topics from the system store. -func (p *Peer) GetAllP2PCollections() ([]client.P2PCollection, error) { +// GetAllP2PCollections gets all the collectionIDs from the pubsup topics +func (p *Peer) GetAllP2PCollections( + ctx context.Context, + req *pb.GetAllP2PCollectionsRequest, +) (*pb.GetAllP2PCollectionsReply, error) { + log.Debug(ctx, "Received GetAllP2PCollections request") + txn, err := p.db.NewTxn(p.ctx, false) if err != nil { return nil, err } - defer txn.Discard(p.ctx) store := p.db.WithTxn(txn) collections, err := p.db.GetAllP2PCollections(p.ctx) if err != nil { + txn.Discard(p.ctx) return nil, err } - p2pCols := []client.P2PCollection{} + pbCols := []*pb.GetAllP2PCollectionsReply_Collection{} for _, colID := range collections { col, err := store.GetCollectionBySchemaID(p.ctx, colID) if err != nil { + txn.Discard(p.ctx) return nil, err } - p2pCols = append(p2pCols, client.P2PCollection{ - ID: colID, + pbCols = append(pbCols, &pb.GetAllP2PCollectionsReply_Collection{ + Id: colID, Name: col.Name(), }) } - return p2pCols, txn.Commit(p.ctx) + return &pb.GetAllP2PCollectionsReply{ + Collections: pbCols, + }, txn.Commit(p.ctx) } diff --git a/net/peer_test.go b/net/peer_test.go new file mode 100644 index 0000000000..092e908cd2 --- /dev/null +++ b/net/peer_test.go @@ -0,0 +1,1182 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + ipld "github.com/ipfs/go-ipld-format" + libp2p "github.com/libp2p/go-libp2p" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + rpc "github.com/textileio/go-libp2p-pubsub-rpc" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/core/crdt" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/db" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/logging" + pb "github.com/sourcenetwork/defradb/net/pb" + netutils "github.com/sourcenetwork/defradb/net/utils" +) + +type EmptyNode struct{} + +var ErrEmptyNode error = errors.New("dummy node") + +func (n *EmptyNode) Resolve([]string) (any, []string, error) { + return nil, nil, ErrEmptyNode +} + +func (n *EmptyNode) Tree(string, int) []string { + return nil +} + +func (n *EmptyNode) ResolveLink([]string) (*ipld.Link, []string, error) { + return nil, nil, ErrEmptyNode +} + +func (n *EmptyNode) Copy() ipld.Node { + return &EmptyNode{} +} + +func (n *EmptyNode) Cid() cid.Cid { + id, err := cid.V1Builder{ + Codec: cid.DagProtobuf, + MhType: mh.SHA2_256, + MhLength: 0, // default length + }.Sum(nil) + + if err != nil { + panic("failed to create an empty cid!") + } + return id +} + +func (n *EmptyNode) Links() []*ipld.Link { + return nil +} + +func (n *EmptyNode) Loggable() map[string]any { + return nil +} + +func (n *EmptyNode) String() string { + return "[]" +} + +func (n *EmptyNode) RawData() []byte { + return nil +} + +func (n *EmptyNode) Size() (uint64, error) { + return 0, nil +} + +func (n *EmptyNode) Stat() (*ipld.NodeStat, error) { + return &ipld.NodeStat{}, nil +} + +func createCID(doc *client.Document) (cid.Cid, error) { + pref := cid.V1Builder{ + Codec: cid.DagProtobuf, + MhType: mh.SHA2_256, + MhLength: 0, // default length + } + + buf, err := doc.Bytes() + if err != nil { + return cid.Cid{}, err + } + + // And then feed it some data + c, err := pref.Sum(buf) + if err != nil { + return cid.Cid{}, err + } + return c, nil +} + +const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" + +func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + cfg := config.DefaultConfig() + cfg.Net.P2PAddress = randomMultiaddr + cfg.Net.RPCAddress = "0.0.0.0:0" + cfg.Net.TCPAddress = randomMultiaddr + + n, err := NewNode( + ctx, + db, + WithConfig(cfg), + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + return db, n +} + +func TestNewPeer_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + h, err := libp2p.New() + require.NoError(t, err) + + _, err = NewPeer(ctx, db, h, nil, nil, nil, nil, nil) + require.NoError(t, err) +} + +func TestNewPeer_NoDB_NilDBError(t *testing.T) { + ctx := context.Background() + + h, err := libp2p.New() + require.NoError(t, err) + + _, err = NewPeer(ctx, nil, h, nil, nil, nil, nil, nil) + require.ErrorIs(t, err, ErrNilDB) +} + +func TestNewPeer_WithExistingTopic_TopicAlreadyExistsError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + _, err = db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + h, err := libp2p.New() + require.NoError(t, err) + + ps, err := pubsub.NewGossipSub( + ctx, + h, + pubsub.WithPeerExchange(true), + pubsub.WithFloodPublish(true), + ) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, ps, h.ID(), doc.Key().String(), true) + require.NoError(t, err) + + _, err = NewPeer(ctx, db, h, nil, ps, nil, nil, nil) + require.ErrorContains(t, err, "topic already exists") +} + +func TestStartAndClose_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + err := n.Start() + require.NoError(t, err) + + db.Close(ctx) +} + +func TestStart_WithKnownPeer_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db1, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + store2 := memory.NewDatastore(ctx) + db2, err := db.NewDB(ctx, store2, db.WithUpdateEvents()) + require.NoError(t, err) + + n1, err := NewNode( + ctx, + db1, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + n2, err := NewNode( + ctx, + db2, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) + + err = n2.Start() + require.NoError(t, err) + + db1.Close(ctx) + db2.Close(ctx) +} + +func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db1, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + store2 := memory.NewDatastore(ctx) + db2, err := db.NewDB(ctx, store2, db.WithUpdateEvents()) + require.NoError(t, err) + + n1, err := NewNode( + ctx, + db1, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + n2, err := NewNode( + ctx, + db2, + WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) + if err != nil { + t.Fatal(err) + } + n2.Boostrap(addrs) + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + err = n1.Close() + require.NoError(t, err) + + // give time for n1 to close + time.Sleep(100 * time.Millisecond) + + err = n2.Start() + require.NoError(t, err) + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 1 { + t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) + } + assert.Equal(t, "Failure while reconnecting to a known peer", logLines[0]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") + + db1.Close(ctx) + db2.Close(ctx) +} + +func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store) + require.NoError(t, err) + + n, err := NewNode( + ctx, + db, + WithPubSub(true), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + err = n.Start() + require.ErrorIs(t, err, ErrNilUpdateChannel) + + db.Close(ctx) +} + +func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { + ctx := context.Background() + store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) + require.NoError(t, err) + + n, err := NewNode( + ctx, + db, + WithPubSub(true), + // WithDataPath() is a required option with the current implementation of key management + WithDataPath(t.TempDir()), + ) + require.NoError(t, err) + + db.Events().Updates.Value().Close() + + err = n.Start() + require.ErrorContains(t, err, "cannot subscribe to a closed channel") + + db.Close(ctx) +} + +func TestRegisterNewDocument_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + err = n.RegisterNewDocument(ctx, doc.Key(), cid, &EmptyNode{}, col.SchemaID()) + require.NoError(t, err) +} + +func TestRegisterNewDocument_RPCTopicAlreadyRegisteredError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, n.Peer.ps, n.Peer.host.ID(), doc.Key().String(), true) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + err = n.RegisterNewDocument(ctx, doc.Key(), cid, &EmptyNode{}, col.SchemaID()) + require.Equal(t, err.Error(), "creating topic: joining topic: topic already exists") +} + +func TestSetReplicator_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + Collections: []string{"User"}, + }, + ) + require.NoError(t, err) +} + +func TestSetReplicator_WithInvalidAddress_InvalidArgumentError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: []byte("/some/invalid/address"), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "InvalidArgument") +} + +func TestSetReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + db.Close(ctx) + + addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "datastore closed") +} + +func TestSetReplicator_WithUndefinedCollection_KeyNotFoundError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found") +} + +func TestSetReplicator_ForAllCollections_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) +} + +func TestPushToReplicator_SingleDocumentNoPeer_FailedToReplicateLogError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + keysCh, err := col.GetAllDocKeys(ctx) + require.NoError(t, err) + + txn, err := db.NewTxn(ctx, true) + require.NoError(t, err) + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + n.pushToReplicator(ctx, txn, col, keysCh, n.PeerID()) + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 1 { + t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) + } + assert.Equal(t, "Failed to replicate log", logLines[0]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") +} + +func TestDeleteReplicator_WithDBClosed_DataStoreClosedError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + db.Close(ctx) + + _, err := n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n.PeerID()), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "datastore closed") +} + +func TestDeleteReplicator_WithTargetSelf_SelfTargetForReplicatorError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + _, err := n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n.PeerID()), + Collections: []string{"User"}, + }, + ) + require.ErrorIs(t, err, ErrSelfTargetForReplicator) +} + +func TestDeleteReplicator_WithInvalidCollection_KeyNotFoundError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + _, n2 := newTestNode(ctx, t) + + _, err := n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n2.PeerID()), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found") +} + +func TestDeleteReplicator_WithCollectionAndPreviouslySetReplicator_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) + + _, err = n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n2.PeerID()), + }, + ) + require.NoError(t, err) +} + +func TestDeleteReplicator_WithNoCollection_NoError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + _, n2 := newTestNode(ctx, t) + + _, err := n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n2.PeerID()), + }, + ) + require.NoError(t, err) +} + +func TestDeleteReplicator_WithNotSetReplicator_KeyNotFoundError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + _, err = n.Peer.DeleteReplicator( + ctx, + &pb.DeleteReplicatorRequest{ + PeerID: []byte(n2.PeerID()), + Collections: []string{"User"}, + }, + ) + require.ErrorContains(t, err, "datastore: key not found") +} + +func TestGetAllReplicator_WithReplicator_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) + + reps, err := n.Peer.GetAllReplicators( + ctx, + &pb.GetAllReplicatorRequest{}, + ) + require.NoError(t, err) + + info, err := peer.AddrInfoFromP2pAddr(addr) + require.NoError(t, err) + + id, err := info.ID.MarshalBinary() + require.NoError(t, err) + + require.Equal(t, id, reps.Replicators[0].Info.Id) +} + +func TestGetAllReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + db.Close(ctx) + + _, err := n.Peer.GetAllReplicators( + ctx, + &pb.GetAllReplicatorRequest{}, + ) + require.ErrorContains(t, err, "datastore closed") +} + +func TestLoadReplicators_WithDBClosed_DatastoreClosedError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + db.Close(ctx) + + err := n.Peer.loadReplicators(ctx) + require.ErrorContains(t, err, "datastore closed") +} + +func TestLoadReplicator_WithReplicator_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) + + err = n.Peer.loadReplicators(ctx) + require.NoError(t, err) +} + +func TestLoadReplicator_WithReplicatorAndEmptyReplicatorMap_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) + + n.replicators = make(map[string]map[peer.ID]struct{}) + + err = n.Peer.loadReplicators(ctx) + require.NoError(t, err) +} + +func TestAddP2PCollections_WithInvalidCollectionID_NotFoundError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + _, err := n.Peer.AddP2PCollections( + ctx, + &pb.AddP2PCollectionsRequest{ + Collections: []string{"invalid_collection"}, + }, + ) + require.Error(t, err, ds.ErrNotFound) +} + +func TestAddP2PCollections_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + _, err = n.Peer.AddP2PCollections( + ctx, + &pb.AddP2PCollectionsRequest{ + Collections: []string{col.SchemaID()}, + }, + ) + require.NoError(t, err) +} + +func TestRemoveP2PCollectionsWithInvalidCollectionID(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + _, err := n.Peer.RemoveP2PCollections( + ctx, + &pb.RemoveP2PCollectionsRequest{ + Collections: []string{"invalid_collection"}, + }, + ) + require.Error(t, err, ds.ErrNotFound) +} + +func TestRemoveP2PCollections(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + _, err = n.Peer.RemoveP2PCollections( + ctx, + &pb.RemoveP2PCollectionsRequest{ + Collections: []string{col.SchemaID()}, + }, + ) + require.NoError(t, err) +} + +func TestGetAllP2PCollectionsWithNoCollections(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + cols, err := n.Peer.GetAllP2PCollections( + ctx, + &pb.GetAllP2PCollectionsRequest{}, + ) + require.NoError(t, err) + require.Len(t, cols.Collections, 0) +} + +func TestGetAllP2PCollections(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + _, err = n.Peer.AddP2PCollections( + ctx, + &pb.AddP2PCollectionsRequest{ + Collections: []string{col.SchemaID()}, + }, + ) + require.NoError(t, err) + + cols, err := n.Peer.GetAllP2PCollections( + ctx, + &pb.GetAllP2PCollectionsRequest{}, + ) + require.NoError(t, err) + require.Equal(t, &pb.GetAllP2PCollectionsReply{ + Collections: []*pb.GetAllP2PCollectionsReply_Collection{{ + Id: col.SchemaID(), + Name: col.Name(), + }}, + }, cols) +} + +func TestHandleDocCreateLog_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + docCid, err := createCID(doc) + require.NoError(t, err) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{docCid}) + require.NoError(t, err) + + err = n.handleDocCreateLog(events.Update{ + DocKey: doc.Key().String(), + Cid: docCid, + SchemaID: col.SchemaID(), + Block: node, + Priority: 0, + }) + require.NoError(t, err) +} + +func TestHandleDocCreateLog_WithInvalidDockey_NoError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + err := n.handleDocCreateLog(events.Update{ + DocKey: "some-invalid-key", + }) + require.ErrorContains(t, err, "failed to get DocKey from broadcast message: selected encoding not supported") +} + +func TestHandleDocCreateLog_WithExistingTopic_TopicExistsError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), doc.Key().String(), true) + require.NoError(t, err) + + err = n.handleDocCreateLog(events.Update{ + DocKey: doc.Key().String(), + SchemaID: col.SchemaID(), + }) + require.ErrorContains(t, err, "topic already exists") +} + +func TestHandleDocUpdateLog_NoError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + docCid, err := createCID(doc) + require.NoError(t, err) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{docCid}) + require.NoError(t, err) + + err = n.handleDocUpdateLog(events.Update{ + DocKey: doc.Key().String(), + Cid: docCid, + SchemaID: col.SchemaID(), + Block: node, + Priority: 0, + }) + require.NoError(t, err) +} + +func TestHandleDoUpdateLog_WithInvalidDockey_NoError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + + err := n.handleDocUpdateLog(events.Update{ + DocKey: "some-invalid-key", + }) + require.ErrorContains(t, err, "failed to get DocKey from broadcast message: selected encoding not supported") +} + +func TestHandleDocUpdateLog_WithExistingDockeyTopic_TopicExistsError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + docCid, err := createCID(doc) + require.NoError(t, err) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{docCid}) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), doc.Key().String(), true) + require.NoError(t, err) + + err = n.handleDocUpdateLog(events.Update{ + DocKey: doc.Key().String(), + Cid: docCid, + SchemaID: col.SchemaID(), + Block: node, + }) + require.ErrorContains(t, err, "topic already exists") +} + +func TestHandleDocUpdateLog_WithExistingSchemaTopic_TopicExistsError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + docCid, err := createCID(doc) + require.NoError(t, err) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{docCid}) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), col.SchemaID(), true) + require.NoError(t, err) + + err = n.handleDocUpdateLog(events.Update{ + DocKey: doc.Key().String(), + Cid: docCid, + SchemaID: col.SchemaID(), + Block: node, + }) + require.ErrorContains(t, err, "topic already exists") +} + +func TestPushLogToReplicator_WithReplicator_FailedPushingLogError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + _, n2 := newTestNode(ctx, t) + + addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) + require.NoError(t, err) + + _, err = n.Peer.SetReplicator( + ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + docCid, err := createCID(doc) + require.NoError(t, err) + + delta := &crdt.CompositeDAGDelta{ + SchemaVersionID: col.Schema().VersionID, + Priority: 1, + DocKey: doc.Key().Bytes(), + } + + node, err := makeNode(delta, []cid.Cid{docCid}) + require.NoError(t, err) + + n.pushLogToReplicators(ctx, events.Update{ + DocKey: doc.Key().String(), + Cid: docCid, + SchemaID: col.SchemaID(), + Block: node, + }) +} + +func TestSession_NoError(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + ng := n.Session(ctx) + require.Implements(t, (*ipld.NodeGetter)(nil), ng) +} diff --git a/net/process.go b/net/process.go index fb40eed08f..35d735d6e5 100644 --- a/net/process.go +++ b/net/process.go @@ -17,6 +17,7 @@ import ( "fmt" "sync" + dag "github.com/ipfs/boxo/ipld/merkledag" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" @@ -38,7 +39,7 @@ func (p *Peer) processLog( ctx context.Context, txn datastore.Txn, col client.Collection, - dockey core.DataStoreKey, + dsKey core.DataStoreKey, c cid.Cid, field string, nd ipld.Node, @@ -47,7 +48,7 @@ func (p *Peer) processLog( ) ([]cid.Cid, error) { log.Debug(ctx, "Running processLog") - crdt, err := initCRDTForType(ctx, txn, col, dockey, field) + crdt, err := initCRDTForType(ctx, txn, col, dsKey, field) if err != nil { return nil, err } @@ -60,7 +61,7 @@ func (p *Peer) processLog( log.Debug( ctx, "Processing PushLog request", - logging.NewKV("DocKey", dockey), + logging.NewKV("Datastore key", dsKey), logging.NewKV("CID", c), ) @@ -86,7 +87,7 @@ func initCRDTForType( ctx context.Context, txn datastore.MultiStore, col client.Collection, - docKey core.DataStoreKey, + dsKey core.DataStoreKey, field string, ) (crdt.MerkleCRDT, error) { var key core.DataStoreKey @@ -97,18 +98,18 @@ func initCRDTForType( key = base.MakeCollectionKey( description, ).WithInstanceInfo( - docKey, + dsKey, ).WithFieldId( core.COMPOSITE_NAMESPACE, ) } else { - fd, ok := description.GetField(field) + fd, ok := description.Schema.GetField(field) if !ok { - return nil, errors.New(fmt.Sprintf("Couldn't find field %s for doc %s", field, docKey)) + return nil, errors.New(fmt.Sprintf("Couldn't find field %s for doc %s", field, dsKey)) } ctype = fd.Typ fieldID := fd.ID.String() - key = base.MakeCollectionKey(description).WithInstanceInfo(docKey).WithFieldId(fieldID) + key = base.MakeCollectionKey(description).WithInstanceInfo(dsKey).WithFieldId(fieldID) } log.Debug(ctx, "Got CRDT Type", logging.NewKV("CType", ctype), logging.NewKV("Field", field)) return crdt.DefaultFactory.InstanceWithStores( @@ -126,7 +127,7 @@ func decodeBlockBuffer(buf []byte, cid cid.Cid) (ipld.Node, error) { if err != nil { return nil, errors.Wrap("failed to create block", err) } - return ipld.Decode(blk) + return ipld.Decode(blk, dag.DecodeProtobufBlock) } func (p *Peer) createNodeGetter( @@ -143,7 +144,7 @@ func (p *Peer) handleChildBlocks( session *sync.WaitGroup, txn datastore.Txn, col client.Collection, - dockey core.DataStoreKey, + dsKey core.DataStoreKey, field string, nd ipld.Node, children []cid.Cid, @@ -187,14 +188,14 @@ func (p *Peer) handleChildBlocks( ctx, "Submitting new job to DAG queue", logging.NewKV("Collection", col.Name()), - logging.NewKV("DocKey", dockey), + logging.NewKV("Datastore key", dsKey), logging.NewKV("Field", fieldName), logging.NewKV("CID", cNode.Cid())) session.Add(1) job := &dagJob{ collection: col, - dockey: dockey, + dsKey: dsKey, fieldName: fieldName, session: session, nodeGetter: getter, diff --git a/net/server.go b/net/server.go index e04f6eb940..ad1fd2fb29 100644 --- a/net/server.go +++ b/net/server.go @@ -17,7 +17,7 @@ import ( "fmt" "sync" - "github.com/gogo/protobuf/proto" + "github.com/ipfs/go-cid" format "github.com/ipfs/go-ipld-format" "github.com/libp2p/go-libp2p/core/event" libpeer "github.com/libp2p/go-libp2p/core/peer" @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" grpcpeer "google.golang.org/grpc/peer" + "google.golang.org/protobuf/proto" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" @@ -56,6 +57,8 @@ type server struct { // This is used to prevent multiple concurrent processing of the same document and // limit unecessary transaction conflicts. docQueue *docQueue + + pb.UnimplementedServiceServer } // pubsubTopic is a wrapper of rpc.Topic to be able to track if the topic has @@ -198,12 +201,18 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL } log.Debug(ctx, "Received a PushLog request", logging.NewKV("PeerID", pid)) - // parse request object - cid := req.Body.Cid.Cid + cid, err := cid.Cast(req.Body.Cid) + if err != nil { + return nil, err + } + dockey, err := client.NewDocKeyFromString(string(req.Body.DocKey)) + if err != nil { + return nil, err + } - s.docQueue.add(req.Body.DocKey.String()) + s.docQueue.add(dockey.String()) defer func() { - s.docQueue.done(req.Body.DocKey.String()) + s.docQueue.done(dockey.String()) if s.pushLogEmitter != nil { byPeer, err := libpeer.Decode(req.Body.Creator) if err != nil { @@ -238,7 +247,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL } schemaID := string(req.Body.SchemaID) - docKey := core.DataStoreKeyFromDocKey(req.Body.DocKey.DocKey) + docKey := core.DataStoreKeyFromDocKey(dockey) var txnErr error for retry := 0; retry < s.peer.db.MaxTxnRetries(); retry++ { @@ -416,7 +425,7 @@ func (s *server) publishLog(ctx context.Context, topic string, req *pb.PushLogRe return s.publishLog(ctx, topic, req) } - data, err := req.Marshal() + data, err := req.MarshalVT() if err != nil { return errors.Wrap("failed marshling pubsub message", err) } @@ -424,10 +433,16 @@ func (s *server) publishLog(ctx context.Context, topic string, req *pb.PushLogRe if _, err := t.Publish(ctx, data, rpc.WithIgnoreResponse(true)); err != nil { return errors.Wrap(fmt.Sprintf("failed publishing to thread %s", topic), err) } + + cid, err := cid.Cast(req.Body.Cid) + if err != nil { + return err + } + log.Debug( ctx, "Published log", - logging.NewKV("CID", req.Body.Cid.Cid), + logging.NewKV("CID", cid), logging.NewKV("DocKey", topic), ) return nil diff --git a/net/server_test.go b/net/server_test.go new file mode 100644 index 0000000000..993c12d875 --- /dev/null +++ b/net/server_test.go @@ -0,0 +1,328 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "io" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/host" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + rpc "github.com/textileio/go-libp2p-pubsub-rpc" + grpcpeer "google.golang.org/grpc/peer" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/logging" + net_pb "github.com/sourcenetwork/defradb/net/pb" +) + +func TestNewServerSimple(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + _, err := newServer(n.Peer, db) + require.NoError(t, err) +} + +func TestNewServerWithDBClosed(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + db.Close(ctx) + _, err := newServer(n.Peer, db) + require.ErrorIs(t, err, memory.ErrClosed) +} + +var mockError = errors.New("mock error") + +type mockDBColError struct { + client.DB +} + +func (mDB *mockDBColError) GetAllCollections(context.Context) ([]client.Collection, error) { + return nil, mockError +} + +func TestNewServerWithGetAllCollectionError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + mDB := mockDBColError{db} + _, err := newServer(n.Peer, &mDB) + require.ErrorIs(t, err, mockError) +} + +func TestNewServerWithCollectionSubscribed(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + err = n.AddP2PCollection(ctx, col.SchemaID()) + require.NoError(t, err) + + _, err = newServer(n.Peer, db) + require.NoError(t, err) +} + +type mockDBDockeysError struct { + client.DB +} + +func (mDB *mockDBDockeysError) GetAllCollections(context.Context) ([]client.Collection, error) { + return []client.Collection{ + &mockCollection{}, + }, nil +} + +type mockCollection struct { + client.Collection +} + +func (mCol *mockCollection) SchemaID() string { + return "mockColID" +} +func (mCol *mockCollection) GetAllDocKeys(ctx context.Context) (<-chan client.DocKeysResult, error) { + return nil, mockError +} + +func TestNewServerWithGetAllDockeysError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + mDB := mockDBDockeysError{db} + + _, err = newServer(n.Peer, &mDB) + require.ErrorIs(t, err, mockError) +} + +func TestNewServerWithAddTopicError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + _, err = rpc.NewTopic(ctx, n.Peer.ps, n.Peer.host.ID(), doc.Key().String(), true) + require.NoError(t, err) + + _, err = newServer(n.Peer, db) + require.ErrorContains(t, err, "topic already exists") +} + +type mockHost struct { + host.Host +} + +func (mH *mockHost) EventBus() event.Bus { + return &mockBus{} +} + +type mockBus struct { + event.Bus +} + +func (mB *mockBus) Emitter(eventType any, opts ...event.EmitterOpt) (event.Emitter, error) { + return nil, mockError +} + +func (mB *mockBus) Subscribe(eventType any, opts ...event.SubscriptionOpt) (event.Subscription, error) { + return nil, mockError +} + +func TestNewServerWithEmitterError(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + n.Peer.host = &mockHost{n.Peer.host} + + b := &bytes.Buffer{} + + log.ApplyConfig(logging.Config{ + Pipe: b, + }) + + _, err = newServer(n.Peer, db) + require.NoError(t, err) + + logLines, err := parseLines(b) + if err != nil { + t.Fatal(err) + } + + if len(logLines) != 2 { + t.Fatalf("expecting exactly 2 log line but got %d lines", len(logLines)) + } + assert.Equal(t, "could not create event emitter", logLines[0]["msg"]) + assert.Equal(t, "could not create event emitter", logLines[1]["msg"]) + + // reset logger + log = logging.MustNewLogger("defra.net") +} + +func parseLines(r io.Reader) ([]map[string]any, error) { + fileScanner := bufio.NewScanner(r) + + fileScanner.Split(bufio.ScanLines) + + logLines := []map[string]any{} + for fileScanner.Scan() { + loggedLine := make(map[string]any) + err := json.Unmarshal(fileScanner.Bytes(), &loggedLine) + if err != nil { + return nil, err + } + logLines = append(logLines, loggedLine) + } + + return logLines, nil +} + +func TestGetDocGraph(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + r, err := n.server.GetDocGraph(ctx, &net_pb.GetDocGraphRequest{}) + require.Nil(t, r) + require.Nil(t, err) +} + +func TestPushDocGraph(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + r, err := n.server.PushDocGraph(ctx, &net_pb.PushDocGraphRequest{}) + require.Nil(t, r) + require.Nil(t, err) +} + +func TestGetLog(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + r, err := n.server.GetLog(ctx, &net_pb.GetLogRequest{}) + require.Nil(t, r) + require.Nil(t, err) +} + +func TestGetHeadLog(t *testing.T) { + ctx := context.Background() + _, n := newTestNode(ctx, t) + r, err := n.server.GetHeadLog(ctx, &net_pb.GetHeadLogRequest{}) + require.Nil(t, r) + require.Nil(t, err) +} + +func TestDocQueue(t *testing.T) { + q := docQueue{ + docs: make(map[string]chan struct{}), + } + + testKey := "test" + + q.add(testKey) + go q.add(testKey) + // give time for the goroutine to block + time.Sleep(10 * time.Millisecond) + require.Len(t, q.docs, 1) + q.done(testKey) + // give time for the goroutine to add the key + time.Sleep(10 * time.Millisecond) + q.mu.Lock() + require.Len(t, q.docs, 1) + q.mu.Unlock() + q.done(testKey) + q.mu.Lock() + require.Len(t, q.docs, 0) + q.mu.Unlock() +} + +func TestPushLog(t *testing.T) { + ctx := context.Background() + db, n := newTestNode(ctx, t) + + _, err := db.AddSchema(ctx, `type User { + name: String + age: Int + }`) + require.NoError(t, err) + + col, err := db.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + require.NoError(t, err) + + cid, err := createCID(doc) + require.NoError(t, err) + + ctx = grpcpeer.NewContext(ctx, &grpcpeer.Peer{ + Addr: addr{n.PeerID()}, + }) + + block := &EmptyNode{} + + _, err = n.server.PushLog(ctx, &net_pb.PushLogRequest{ + Body: &net_pb.PushLogRequest_Body{ + DocKey: []byte(doc.Key().String()), + Cid: cid.Bytes(), + SchemaID: []byte(col.SchemaID()), + Creator: n.PeerID().String(), + Log: &net_pb.Document_Log{ + Block: block.RawData(), + }, + }, + }) + require.NoError(t, err) +} diff --git a/node/node_test.go b/node/node_test.go deleted file mode 100644 index 0a8c48c8fd..0000000000 --- a/node/node_test.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package node - -import ( - "context" - "testing" - - badger "github.com/dgraph-io/badger/v3" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/client" - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" - "github.com/sourcenetwork/defradb/db" - netutils "github.com/sourcenetwork/defradb/net/utils" -) - -// Node.Boostrap is not tested because the underlying, *ipfslite.Peer.Bootstrap is a best-effort function. - -func FixtureNewMemoryDBWithBroadcaster(t *testing.T) client.DB { - var database client.DB - var options []db.Option - ctx := context.Background() - options = append(options, db.WithUpdateEvents()) - opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} - rootstore, err := badgerds.NewDatastore("", &opts) - assert.NoError(t, err) - database, err = db.NewDB(ctx, rootstore, options...) - assert.NoError(t, err) - return database -} - -func TestNewNode(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - _, err := NewNode( - context.Background(), - db, - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) -} - -func TestNewNodeNoPubSub(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - WithPubSub(false), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - assert.Nil(t, n.pubsub) -} - -func TestNewNodeWithPubSub(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - ctx := context.Background() - n, err := NewNode( - ctx, - db, - WithPubSub(true), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - // overly simple check of validity of pubsub, avoiding the process of creating a PubSub - assert.NotNil(t, n.pubsub) -} - -func TestNewNodeWithPubSubFailsWithoutDataPath(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - ctx := context.Background() - _, err := NewNode( - ctx, - db, - WithPubSub(true), - ) - assert.EqualError(t, err, "1 error occurred:\n\t* mkdir : no such file or directory\n\n") -} - -func TestNodeClose(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - err = n.Close() - assert.NoError(t, err) -} - -func TestNewNodeBootstrapWithNoPeer(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - ctx := context.Background() - n1, err := NewNode( - ctx, - db, - ListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - n1.Boostrap([]peer.AddrInfo{}) -} - -func TestNewNodeBootstrapWithOnePeer(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - ctx := context.Background() - n1, err := NewNode( - ctx, - db, - ListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - n2, err := NewNode( - ctx, - db, - ListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) - if err != nil { - t.Fatal(err) - } - n2.Boostrap(addrs) -} - -func TestNewNodeBootstrapWithOneValidPeerAndManyInvalidPeers(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - ctx := context.Background() - n1, err := NewNode( - ctx, - db, - ListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - n2, err := NewNode( - ctx, - db, - ListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // DataPath() is a required option with the current implementation of key management - DataPath(t.TempDir()), - ) - assert.NoError(t, err) - addrs, err := netutils.ParsePeers([]string{ - n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String(), - "/ip4/0.0.0.0/tcp/1234/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci6", - "/ip4/0.0.0.0/tcp/1235/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci5", - "/ip4/0.0.0.0/tcp/1236/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci4", - }) - if err != nil { - t.Fatal(err) - } - n2.Boostrap(addrs) -} - -func mergeOptions(nodeOpts ...NodeOpt) (Options, error) { - var options Options - var nodeOpt NodeOpt - for _, opt := range append(nodeOpts, nodeOpt) { - if opt == nil { - continue - } - if err := opt(&options); err != nil { - return options, err - } - } - return options, nil -} - -func TestInvalidListenTCPAddrString(t *testing.T) { - opt := ListenTCPAddrString("/ip4/碎片整理") - options, err := mergeOptions(opt) - assert.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") - assert.Equal(t, Options{}, options) -} diff --git a/planner/average.go b/planner/average.go index aacadab2ca..9de120ed98 100644 --- a/planner/average.go +++ b/planner/average.go @@ -56,7 +56,7 @@ func (p *Planner) Average( sumFieldIndex: sumField.Index, countFieldIndex: countField.Index, virtualFieldIndex: field.Index, - docMapper: docMapper{&field.DocumentMapping}, + docMapper: docMapper{field.DocumentMapping}, }, nil } diff --git a/planner/commit.go b/planner/commit.go index fca9c62865..e6216e2b43 100644 --- a/planner/commit.go +++ b/planner/commit.go @@ -53,7 +53,7 @@ func (p *Planner) DAGScan(commitSelect *mapper.CommitSelect) *dagScanNode { visitedNodes: make(map[string]bool), queuedCids: []*cid.Cid{}, commitSelect: commitSelect, - docMapper: docMapper{&commitSelect.DocumentMapping}, + docMapper: docMapper{commitSelect.DocumentMapping}, } } @@ -333,7 +333,7 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L return core.Doc{}, nil, err } - field, ok := c.Description().GetField(fieldName.(string)) + field, ok := c.Description().Schema.GetField(fieldName.(string)) if !ok { return core.Doc{}, nil, client.NewErrFieldNotExist(fieldName.(string)) } diff --git a/planner/count.go b/planner/count.go index 28222f11c7..a3eddf0fbc 100644 --- a/planner/count.go +++ b/planner/count.go @@ -48,7 +48,7 @@ func (p *Planner) Count(field *mapper.Aggregate, host *mapper.Select) (*countNod p: p, virtualFieldIndex: field.Index, aggregateMapping: field.AggregateTargets, - docMapper: docMapper{&field.DocumentMapping}, + docMapper: docMapper{field.DocumentMapping}, }, nil } @@ -75,10 +75,22 @@ func (n *countNode) simpleExplain() (map[string]any, error) { simpleExplainMap := map[string]any{} // Add the filter attribute if it exists. - if source.Filter == nil || source.Filter.ExternalConditions == nil { + if source.Filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = source.Filter.ExternalConditions + // get the target aggregate document mapping. Since the filters + // are relative to the target aggregate collection (and doc mapper). + // + // We can determine if there is a child map if the index from the + // aggregate target is set (non nil) on the childMapping + var targetMap *core.DocumentMapping + if source.Index < len(n.documentMapping.ChildMappings) && + n.documentMapping.ChildMappings[source.Index] != nil { + targetMap = n.documentMapping.ChildMappings[source.Index] + } else { + targetMap = n.documentMapping + } + simpleExplainMap[filterLabel] = source.Filter.ToMap(targetMap) } // Add the main field name. diff --git a/planner/create.go b/planner/create.go index 291c723300..618591ccfe 100644 --- a/planner/create.go +++ b/planner/create.go @@ -88,9 +88,13 @@ func (n *createNode) Next() (bool, error) { currentValue.SetKey(n.doc.Key().String()) for i, value := range n.doc.Values() { - // On create the document will have no aliased fields/aggregates/etc so we can safely take - // the first index. - n.documentMapping.SetFirstOfName(¤tValue, i.Name(), value.Value()) + if len(n.documentMapping.IndexesByName[i.Name()]) > 0 { + n.documentMapping.SetFirstOfName(¤tValue, i.Name(), value.Value()) + } else if aliasName := i.Name() + request.RelatedObjectID; len(n.documentMapping.IndexesByName[aliasName]) > 0 { + n.documentMapping.SetFirstOfName(¤tValue, aliasName, value.Value()) + } else { + return false, client.NewErrFieldNotExist(i.Name()) + } } n.returned = true @@ -171,7 +175,7 @@ func (p *Planner) CreateDoc(parsed *mapper.Mutation) (planNode, error) { p: p, newDocStr: parsed.Data, results: results, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, } // get collection diff --git a/planner/datasource.go b/planner/datasource.go index 2dea8290c5..afcfbab3ce 100644 --- a/planner/datasource.go +++ b/planner/datasource.go @@ -35,15 +35,13 @@ func (p *Planner) getSource(parsed *mapper.Select) (planSource, error) { return p.getCollectionScanPlan(parsed) } -// @todo: Add field selection func (p *Planner) getCollectionScanPlan(parsed *mapper.Select) (planSource, error) { colDesc, err := p.getCollectionDesc(parsed.CollectionName) if err != nil { return planSource{}, err } - scan := p.Scan(parsed) - err = scan.initCollection(colDesc) + scan, err := p.Scan(parsed) if err != nil { return planSource{}, err } diff --git a/planner/delete.go b/planner/delete.go index ef79463302..de59cf30b7 100644 --- a/planner/delete.go +++ b/planner/delete.go @@ -91,10 +91,10 @@ func (n *deleteNode) simpleExplain() (map[string]any, error) { simpleExplainMap[idsLabel] = n.ids // Add the filter attribute if it exists, otherwise have it nil. - if n.filter == nil || n.filter.ExternalConditions == nil { + if n.filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = n.filter.ExternalConditions + simpleExplainMap[filterLabel] = n.filter.ToMap(n.documentMapping) } return simpleExplainMap, nil @@ -134,6 +134,6 @@ func (p *Planner) DeleteDocs(parsed *mapper.Mutation) (planNode, error) { ids: parsed.DocKeys.Value(), collection: col.WithTxn(p.txn), source: slctNode, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, }, nil } diff --git a/planner/errors.go b/planner/errors.go index fbe3f89f78..c4856178f3 100644 --- a/planner/errors.go +++ b/planner/errors.go @@ -16,6 +16,7 @@ const ( errUnknownDependency string = "given field does not exist" errFailedToClosePlan string = "failed to close the plan" errFailedToCollectExecExplainInfo string = "failed to collect execution explain information" + errSubTypeInit string = "sub-type initialization error at scan node reset" ) var ( @@ -33,6 +34,7 @@ var ( ErrMissingChildValue = errors.New("expected child value, however none was yielded") ErrUnknownRelationType = errors.New("failed sub selection, unknown relation type") ErrUnknownExplainRequestType = errors.New("can not explain request of unknown type") + ErrSubTypeInit = errors.New(errSubTypeInit) ErrFailedToCollectExecExplainInfo = errors.New(errFailedToCollectExecExplainInfo) ErrUnknownDependency = errors.New(errUnknownDependency) ) @@ -48,3 +50,7 @@ func NewErrFailedToClosePlan(inner error, location string) error { func NewErrFailedToCollectExecExplainInfo(inner error) error { return errors.Wrap(errFailedToCollectExecExplainInfo, inner) } + +func NewErrSubTypeInit(inner error) error { + return errors.Wrap(errSubTypeInit, inner) +} diff --git a/planner/explain.go b/planner/explain.go index f4494fcf72..560063b4ba 100644 --- a/planner/explain.go +++ b/planner/explain.go @@ -57,12 +57,113 @@ const ( fieldNameLabel = "fieldName" filterLabel = "filter" idsLabel = "ids" + joinRootLabel = "root" + joinSubTypeLabel = "subType" + keysLabel = "_keys" limitLabel = "limit" offsetLabel = "offset" sourcesLabel = "sources" spansLabel = "spans" ) +// buildDebugExplainGraph dumps the entire plan graph as is, with all the plan nodes. +// +// Note: This also includes plan nodes that aren't "explainable". +func buildDebugExplainGraph(source planNode) (map[string]any, error) { + explainGraph := map[string]any{} + + if source == nil { + return explainGraph, nil + } + + switch node := source.(type) { + // Walk the multiple children if it is a MultiNode. + case MultiNode: + multiChildExplainGraph := []map[string]any{} + for _, childSource := range node.Children() { + childExplainGraph, err := buildDebugExplainGraph(childSource) + if err != nil { + return nil, err + } + multiChildExplainGraph = append(multiChildExplainGraph, childExplainGraph) + } + nodeLabelTitle := strcase.ToLowerCamel(node.Kind()) + explainGraph[nodeLabelTitle] = multiChildExplainGraph + + case *typeJoinMany: + var explainGraphBuilder = map[string]any{} + + // If root is not the last child then keep walking and explaining the root graph. + if node.root != nil { + indexJoinRootExplainGraph, err := buildDebugExplainGraph(node.root) + if err != nil { + return nil, err + } + // Add the explaination of the rest of the explain graph under the "root" graph. + explainGraphBuilder[joinRootLabel] = indexJoinRootExplainGraph + } + + if node.subType != nil { + indexJoinSubTypeExplainGraph, err := buildDebugExplainGraph(node.subType) + if err != nil { + return nil, err + } + // Add the explaination of the rest of the explain graph under the "subType" graph. + explainGraphBuilder[joinSubTypeLabel] = indexJoinSubTypeExplainGraph + } + + nodeLabelTitle := strcase.ToLowerCamel(node.Kind()) + explainGraph[nodeLabelTitle] = explainGraphBuilder + + case *typeJoinOne: + var explainGraphBuilder = map[string]any{} + + // If root is not the last child then keep walking and explaining the root graph. + if node.root != nil { + indexJoinRootExplainGraph, err := buildDebugExplainGraph(node.root) + if err != nil { + return nil, err + } + // Add the explaination of the rest of the explain graph under the "root" graph. + explainGraphBuilder[joinRootLabel] = indexJoinRootExplainGraph + } else { + explainGraphBuilder[joinRootLabel] = nil + } + + if node.subType != nil { + indexJoinSubTypeExplainGraph, err := buildDebugExplainGraph(node.subType) + if err != nil { + return nil, err + } + // Add the explaination of the rest of the explain graph under the "subType" graph. + explainGraphBuilder[joinSubTypeLabel] = indexJoinSubTypeExplainGraph + } else { + explainGraphBuilder[joinSubTypeLabel] = nil + } + + nodeLabelTitle := strcase.ToLowerCamel(node.Kind()) + explainGraph[nodeLabelTitle] = explainGraphBuilder + + default: + var explainGraphBuilder = map[string]any{} + + // If not the last child then keep walking the graph to find more plan nodes. + // Also make sure the next source / child isn't a recursive `topLevelNode`. + if next := node.Source(); next != nil && next.Kind() != topLevelNodeKind { + var err error + explainGraphBuilder, err = buildDebugExplainGraph(next) + if err != nil { + return nil, err + } + } + // Add the graph of the next node under current node. + nodeLabelTitle := strcase.ToLowerCamel(node.Kind()) + explainGraph[nodeLabelTitle] = explainGraphBuilder + } + + return explainGraph, nil +} + // buildSimpleExplainGraph builds the explainGraph from the given top level plan. // // Request: @@ -134,7 +235,7 @@ func buildSimpleExplainGraph(source planNode) (map[string]any, error) { return nil, err } // Add the explaination of the rest of the explain graph under the "root" graph. - indexJoinGraph["root"] = indexJoinRootExplainGraph + indexJoinGraph[joinRootLabel] = indexJoinRootExplainGraph } // Add this restructured typeIndexJoin explain graph. explainGraph[strcase.ToLowerCamel(node.Kind())] = indexJoinGraph @@ -345,6 +446,22 @@ func (p *Planner) explainRequest( return explainResult, nil + case request.DebugExplain: + // walks through the plan graph, and outputs the concrete planNodes that should + // be executed, maintaining their order in the plan graph (does not actually execute them). + explainGraph, err := buildDebugExplainGraph(plan) + if err != nil { + return nil, err + } + + explainResult := []map[string]any{ + { + request.ExplainLabel: explainGraph, + }, + } + + return explainResult, nil + case request.ExecuteExplain: return p.executeAndExplainRequest(ctx, plan) diff --git a/planner/group.go b/planner/group.go index e87d753d14..0890b13d84 100644 --- a/planner/group.go +++ b/planner/group.go @@ -91,7 +91,7 @@ func (p *Planner) GroupBy(n *mapper.GroupBy, parsed *mapper.Select, childSelects childSelects: childSelects, groupByFields: n.Fields, dataSources: dataSources, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, } return &groupNodeObj, nil } @@ -236,10 +236,10 @@ func (n *groupNode) simpleExplain() (map[string]any, error) { childExplainGraph["docKeys"] = nil } - if c.Filter == nil || c.Filter.ExternalConditions == nil { + if c.Filter == nil { childExplainGraph[filterLabel] = nil } else { - childExplainGraph[filterLabel] = c.Filter.ExternalConditions + childExplainGraph[filterLabel] = c.Filter.ToMap(n.documentMapping) } if c.Limit != nil { diff --git a/planner/limit.go b/planner/limit.go index d3c2954d9b..979bc50c02 100644 --- a/planner/limit.go +++ b/planner/limit.go @@ -46,7 +46,7 @@ func (p *Planner) Limit(parsed *mapper.Select, n *mapper.Limit) (*limitNode, err limit: n.Limit, offset: n.Offset, rowIndex: 0, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, }, nil } diff --git a/planner/mapper/aggregate.go b/planner/mapper/aggregate.go index 4d7671914e..ceed03448e 100644 --- a/planner/mapper/aggregate.go +++ b/planner/mapper/aggregate.go @@ -45,7 +45,7 @@ type AggregateTarget struct { type Aggregate struct { Field // The mapping of this aggregate's parent/host. - core.DocumentMapping + *core.DocumentMapping // The collection of targets that this aggregate will aggregate. AggregateTargets []AggregateTarget diff --git a/planner/mapper/errors.go b/planner/mapper/errors.go index 83a5c11b3a..552021ca94 100644 --- a/planner/mapper/errors.go +++ b/planner/mapper/errors.go @@ -12,8 +12,18 @@ package mapper import "github.com/sourcenetwork/defradb/errors" +const ( + errInvalidFieldToGroupBy string = "invalid field value to groupBy" +) + var ( ErrUnableToIdAggregateChild = errors.New("unable to identify aggregate child") ErrAggregateTargetMissing = errors.New("aggregate must be provided with a property to aggregate") ErrFailedToFindHostField = errors.New("failed to find host field") + ErrInvalidFieldIndex = errors.New("given field doesn't have any indexes") + ErrMissingSelect = errors.New("missing target select field") ) + +func NewErrInvalidFieldToGroupBy(field string) error { + return errors.New(errInvalidFieldToGroupBy, errors.NewKV("Field", field)) +} diff --git a/planner/mapper/mapper.go b/planner/mapper/mapper.go index a2a8250f4a..5b823f6ec2 100644 --- a/planner/mapper/mapper.go +++ b/planner/mapper/mapper.go @@ -25,6 +25,10 @@ import ( "github.com/sourcenetwork/defradb/datastore" ) +var ( + FilterEqOp = &Operator{Operation: "_eq"} +) + // ToSelect converts the given [parser.Select] into a [Select]. // // In the process of doing so it will construct the document map required to access the data @@ -69,8 +73,9 @@ func toSelect( fields = append(fields, filterDependencies...) // Resolve order dependencies that may have been missed due to not being rendered. - if err := resolveOrderDependencies( - descriptionsRepo, collectionName, selectRequest.OrderBy, mapping, &fields); err != nil { + err = resolveOrderDependencies( + descriptionsRepo, collectionName, selectRequest.OrderBy, mapping, &fields) + if err != nil { return nil, err } @@ -83,12 +88,31 @@ func toSelect( desc, descriptionsRepo, ) + if err != nil { return nil, err } - // If there is a groupBy, and no inner group has been requested, we need to map the property here + // Resolve groupBy mappings i.e. alias remapping and handle missed inner group. if selectRequest.GroupBy.HasValue() { + groupByFields := selectRequest.GroupBy.Value().Fields + // Remap all alias field names to use their internal field name mappings. + for index, groupByField := range groupByFields { + fieldDesc, ok := desc.Schema.GetField(groupByField) + if ok && fieldDesc.IsObject() && !fieldDesc.IsObjectArray() { + groupByFields[index] = groupByField + request.RelatedObjectID + } else if ok && fieldDesc.IsObjectArray() { + return nil, NewErrInvalidFieldToGroupBy(groupByField) + } + } + + selectRequest.GroupBy = immutable.Some( + request.GroupBy{ + Fields: groupByFields, + }, + ) + + // If there is a groupBy, and no inner group has been requested, we need to map the property here if _, isGroupFieldMapped := mapping.IndexesByName[request.GroupFieldName]; !isGroupFieldMapped { index := mapping.GetNextIndex() mapping.Add(index, request.GroupFieldName) @@ -97,7 +121,7 @@ func toSelect( return &Select{ Targetable: toTargetable(thisIndex, selectRequest, mapping), - DocumentMapping: *mapping, + DocumentMapping: mapping, Cid: selectRequest.CID, CollectionName: collectionName, Fields: fields, @@ -117,38 +141,103 @@ func resolveOrderDependencies( return nil } + currentExistingFields := existingFields // If there is orderby, and any one of the condition fields that are join fields and have not been // requested, we need to map them here. +outer: for _, condition := range source.Value().Conditions { - if len(condition.Fields) <= 1 { - continue - } - - joinField := condition.Fields[0] + fields := condition.Fields[:] // copy slice + for { + numFields := len(fields) + // <2 fields: Direct field on the root type: {age: DESC} + // 2 fields: Single depth related type: {author: {age: DESC}} + // >2 fields: Multi depth related type: {author: {friends: {age: DESC}}} + if numFields == 2 { + joinField := fields[0] + + // ensure the child select is resolved for this order join + innerSelect, err := resolveChildOrder(descriptionsRepo, descName, joinField, mapping, currentExistingFields) + if err != nil { + return err + } - // Check if the join field is already mapped, if not then map it. - if isOrderJoinFieldMapped := len(mapping.IndexesByName[joinField]) != 0; !isOrderJoinFieldMapped { - index := mapping.GetNextIndex() - mapping.Add(index, joinField) + // make sure the actual target field inside the join field + // is included in the select + targetFieldName := fields[1] + targetField := &Field{ + Index: innerSelect.FirstIndexOfName(targetFieldName), + Name: targetFieldName, + } + innerSelect.Fields = append(innerSelect.Fields, targetField) + continue outer + } else if numFields > 2 { + joinField := fields[0] - // Resolve the inner child fields and get it's mapping. - dummyJoinFieldSelect := request.Select{ - Field: request.Field{ - Name: joinField, - }, - } - innerSelect, err := toSelect(descriptionsRepo, index, &dummyJoinFieldSelect, descName) - if err != nil { - return err + // ensure the child select is resolved for this order join + innerSelect, err := resolveChildOrder(descriptionsRepo, descName, joinField, mapping, existingFields) + if err != nil { + return err + } + mapping = innerSelect.DocumentMapping + currentExistingFields = &innerSelect.Fields + fields = fields[1:] // chop off the front item, and loop again on inner + } else { // <= 1 + targetFieldName := fields[0] + *existingFields = append(*existingFields, &Field{ + Index: mapping.FirstIndexOfName(targetFieldName), + Name: targetFieldName, + }) + // nothing todo, continue the outer for loop + continue outer } - *existingFields = append(*existingFields, innerSelect) - mapping.SetChildAt(index, &innerSelect.DocumentMapping) } } return nil } +// given a type join field, ensure its mapping exists +// and add a coorsponding select field(s) +func resolveChildOrder( + descriptionsRepo *DescriptionsRepo, + descName string, + orderChildField string, + mapping *core.DocumentMapping, + existingFields *[]Requestable, +) (*Select, error) { + childFieldIndexes := mapping.IndexesByName[orderChildField] + // Check if the join field is already mapped, if not then map it. + if len(childFieldIndexes) == 0 { + index := mapping.GetNextIndex() + mapping.Add(index, orderChildField) + + // Resolve the inner child fields and get it's mapping. + dummyJoinFieldSelect := request.Select{ + Field: request.Field{ + Name: orderChildField, + }, + } + innerSelect, err := toSelect(descriptionsRepo, index, &dummyJoinFieldSelect, descName) + if err != nil { + return nil, err + } + *existingFields = append(*existingFields, innerSelect) + mapping.SetChildAt(index, innerSelect.DocumentMapping) + return innerSelect, nil + } else { + for _, field := range *existingFields { + fieldSelect, ok := field.(*Select) + if !ok { + continue + } + if fieldSelect.Field.Name == orderChildField { + return fieldSelect, nil + } + } + } + return nil, ErrMissingSelect +} + // resolveAggregates figures out which fields the given aggregates are targeting // and converts the aggregateRequest into an Aggregate, appending it onto the given // fields slice. @@ -166,7 +255,6 @@ func resolveAggregates( ) ([]Requestable, error) { fields := inputFields dependenciesByParentId := map[int][]int{} - for _, aggregate := range aggregates { aggregateTargets := make([]AggregateTarget, len(aggregate.targets)) @@ -182,7 +270,7 @@ func resolveAggregates( var hasHost bool var convertedFilter *Filter if childIsMapped { - fieldDesc, isField := desc.GetField(target.hostExternalName) + fieldDesc, isField := desc.Schema.GetField(target.hostExternalName) if isField && !fieldDesc.IsObject() { var order *OrderBy if target.order.HasValue() && len(target.order.Value().Conditions) > 0 { @@ -205,14 +293,14 @@ func resolveAggregates( Index: int(fieldDesc.ID), Name: target.hostExternalName, }, - Filter: ToFilter(target.filter, mapping), + Filter: ToFilter(target.filter.Value(), mapping), Limit: target.limit, OrderBy: order, } } else { childObjectIndex := mapping.FirstIndexOfName(target.hostExternalName) childMapping := mapping.ChildMappings[childObjectIndex] - convertedFilter = ToFilter(target.filter, childMapping) + convertedFilter = ToFilter(target.filter.Value(), childMapping) host, hasHost = tryGetTarget( target.hostExternalName, convertedFilter, @@ -238,7 +326,6 @@ func resolveAggregates( if err != nil { return nil, err } - mapAggregateNestedTargets(target, hostSelectRequest, selectRequest.Root) childMapping, childDesc, err := getTopLevelInfo(descriptionsRepo, hostSelectRequest, childCollectionName) @@ -251,13 +338,19 @@ func resolveAggregates( return nil, err } + err = resolveOrderDependencies( + descriptionsRepo, childCollectionName, target.order, childMapping, &childFields) + if err != nil { + return nil, err + } + childMapping = childMapping.CloneWithoutRender() mapping.SetChildAt(index, childMapping) if !childIsMapped { // If the child was not mapped, the filter will not have been converted yet // so we must do that now. - convertedFilter = ToFilter(target.filter, mapping.ChildMappings[index]) + convertedFilter = ToFilter(target.filter.Value(), mapping.ChildMappings[index]) } dummyJoin := &Select{ @@ -271,7 +364,7 @@ func resolveAggregates( OrderBy: toOrderBy(target.order, childMapping), }, CollectionName: childCollectionName, - DocumentMapping: *childMapping, + DocumentMapping: childMapping, Fields: childFields, } @@ -308,6 +401,12 @@ func resolveAggregates( return nil, ErrUnableToIdAggregateChild } + // ensure target aggregate field is included in the type join + hostSelect.Fields = append(hostSelect.Fields, &Field{ + Index: hostSelect.DocumentMapping.FirstIndexOfName(target.childExternalName), + Name: target.childExternalName, + }) + childTarget = OptionalChildTarget{ // If there are multiple children of the same name there is no way // for us (or the consumer) to identify which one they are hoping for @@ -326,7 +425,7 @@ func resolveAggregates( newAggregate := Aggregate{ Field: aggregate.field, - DocumentMapping: *mapping, + DocumentMapping: mapping, AggregateTargets: aggregateTargets, } fields = append(fields, &newAggregate) @@ -514,7 +613,7 @@ func getRequestables( return nil, nil, err } fields = append(fields, innerSelect) - mapping.SetChildAt(index, &innerSelect.DocumentMapping) + mapping.SetChildAt(index, innerSelect.DocumentMapping) mapping.RenderKeys = append(mapping.RenderKeys, core.RenderKey{ Index: index, @@ -594,7 +693,7 @@ func getCollectionName( return "", err } - hostFieldDesc, parentHasField := parentDescription.GetField(selectRequest.Name) + hostFieldDesc, parentHasField := parentDescription.Schema.GetField(selectRequest.Name) if parentHasField && hostFieldDesc.RelationType != 0 { // If this field exists on the parent, and it is a child object // then this collection name is the collection name of the child. @@ -696,6 +795,7 @@ func resolveInnerFilterDependencies( ) ([]Requestable, error) { newFields := []Requestable{} +sourceLoop: for key := range source { if strings.HasPrefix(key, "_") && key != request.KeyFieldName { continue @@ -732,7 +832,7 @@ func resolveInnerFilterDependencies( }, }, CollectionName: childCollectionName, - DocumentMapping: *childMapping, + DocumentMapping: childMapping, } newFields = append(newFields, dummyJoin) @@ -743,7 +843,18 @@ func resolveInnerFilterDependencies( if keyIndex >= len(mapping.ChildMappings) { // If the key index is outside the bounds of the child mapping array, then - // this is not a relation/join and we can continue (no child props to process) + // this is not a relation/join and we can add it to the fields and + // continue (no child props to process) + for _, field := range existingFields { + if field.GetIndex() == keyIndex { + continue sourceLoop + } + } + newFields = append(existingFields, &Field{ + Index: keyIndex, + Name: key, + }) + continue } @@ -778,7 +889,7 @@ func resolveInnerFilterDependencies( enumerable.New(existingFields), ) - matchingFields := enumerable.Where(allFields, func(existingField Requestable) (bool, error) { + matchingFields := enumerable.Where[Requestable](allFields, func(existingField Requestable) (bool, error) { return existingField.GetIndex() == keyIndex, nil }) @@ -860,7 +971,7 @@ func toTargetable(index int, selectRequest *request.Select, docMap *core.Documen return Targetable{ Field: toField(index, selectRequest), DocKeys: selectRequest.DocKeys, - Filter: ToFilter(selectRequest.Filter, docMap), + Filter: ToFilter(selectRequest.Filter.Value(), docMap), Limit: toLimit(selectRequest.Limit, selectRequest.Offset), GroupBy: toGroupBy(selectRequest.GroupBy, docMap), OrderBy: toOrderBy(selectRequest.OrderBy, docMap), @@ -878,20 +989,20 @@ func toField(index int, selectRequest *request.Select) Field { // ToFilter converts the given `source` request filter to a Filter using the given mapping. // // Any requestables identified by name will be converted to being identified by index instead. -func ToFilter(source immutable.Option[request.Filter], mapping *core.DocumentMapping) *Filter { - if !source.HasValue() { +func ToFilter(source request.Filter, mapping *core.DocumentMapping) *Filter { + if len(source.Conditions) == 0 { return nil } - conditions := make(map[connor.FilterKey]any, len(source.Value().Conditions)) + conditions := make(map[connor.FilterKey]any, len(source.Conditions)) - for sourceKey, sourceClause := range source.Value().Conditions { + for sourceKey, sourceClause := range source.Conditions { key, clause := toFilterMap(sourceKey, sourceClause, mapping) conditions[key] = clause } return &Filter{ Conditions: conditions, - ExternalConditions: source.Value().Conditions, + ExternalConditions: source.Conditions, } } @@ -928,6 +1039,13 @@ func toFilterMap( returnClauses = append(returnClauses, returnClause) } return key, returnClauses + case map[string]any: + innerMapClause := map[connor.FilterKey]any{} + for innerSourceKey, innerSourceValue := range typedClause { + rKey, rValue := toFilterMap(innerSourceKey, innerSourceValue, mapping) + innerMapClause[rKey] = rValue + } + return key, innerMapClause default: return key, typedClause } diff --git a/planner/mapper/select.go b/planner/mapper/select.go index 2696c0ca82..1c4b509caa 100644 --- a/planner/mapper/select.go +++ b/planner/mapper/select.go @@ -25,7 +25,7 @@ type Select struct { // The document mapping for this select, describing how items yielded // for this select can be accessed and rendered. - core.DocumentMapping + *core.DocumentMapping // A commit identifier that can be specified to request data at a given time. Cid immutable.Option[string] diff --git a/planner/mapper/targetable.go b/planner/mapper/targetable.go index 1d2861f23f..49190b911f 100644 --- a/planner/mapper/targetable.go +++ b/planner/mapper/targetable.go @@ -86,6 +86,55 @@ func NewFilter() *Filter { } } +func (f *Filter) ToMap(mapping *core.DocumentMapping) map[string]any { + return filterObjectToMap(mapping, f.Conditions) +} + +func filterObjectToMap(mapping *core.DocumentMapping, obj map[connor.FilterKey]any) map[string]any { + outmap := make(map[string]any) + if obj == nil { + return nil + } + for k, v := range obj { + switch keyType := k.(type) { + case *PropertyIndex: + subObj := v.(map[connor.FilterKey]any) + outkey, _ := mapping.TryToFindNameFromIndex(keyType.Index) + childMapping, ok := tryGetChildMapping(mapping, keyType.Index) + if ok { + outmap[outkey] = filterObjectToMap(childMapping, subObj) + } else { + outmap[outkey] = filterObjectToMap(mapping, subObj) + } + + case *Operator: + switch keyType.Operation { + case "_and", "_or": + v := v.([]any) + logicMapEntries := make([]any, len(v)) + for i, item := range v { + itemMap := item.(map[connor.FilterKey]any) + logicMapEntries[i] = filterObjectToMap(mapping, itemMap) + } + outmap[keyType.Operation] = logicMapEntries + case "_not": + itemMap := v.(map[connor.FilterKey]any) + outmap[keyType.Operation] = filterObjectToMap(mapping, itemMap) + default: + outmap[keyType.Operation] = v + } + } + } + return outmap +} + +func tryGetChildMapping(mapping *core.DocumentMapping, index int) (*core.DocumentMapping, bool) { + if index <= len(mapping.ChildMappings)-1 { + return mapping.ChildMappings[index], true + } + return nil, false +} + // Limit represents a limit-offset pairing that controls how many // and which records will be returned from a request. type Limit struct { diff --git a/planner/order.go b/planner/order.go index 7bbe0c91b0..5f61a952c9 100644 --- a/planner/order.go +++ b/planner/order.go @@ -82,7 +82,7 @@ func (p *Planner) OrderBy(parsed *mapper.Select, n *mapper.OrderBy) (*orderNode, p: p, ordering: n.Conditions, needSort: true, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, }, nil } @@ -110,14 +110,20 @@ func (n *orderNode) simpleExplain() (map[string]any, error) { for _, element := range n.ordering { // Build the list containing the corresponding names of all the indexes. fieldNames := []string{} + + mapping := n.documentMapping for _, fieldIndex := range element.FieldIndexes { - // Try to find the name of this index. - fieldName, found := n.documentMapping.TryToFindNameFromIndex(fieldIndex) + fieldName, found := mapping.TryToFindNameFromIndex(fieldIndex) if !found { return nil, client.NewErrFieldIndexNotExist(fieldIndex) } fieldNames = append(fieldNames, fieldName) + if fieldIndex < len(mapping.ChildMappings) { + if childMapping := mapping.ChildMappings[fieldIndex]; childMapping != nil { + mapping = childMapping + } + } } // Put it all together for this order element. diff --git a/planner/planner.go b/planner/planner.go index fb6d325123..3af7b745e7 100644 --- a/planner/planner.go +++ b/planner/planner.go @@ -17,14 +17,9 @@ import ( "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/planner/mapper" ) -var ( - log = logging.MustNewLogger("planner") -) - // planNode is an interface all nodes in the plan tree need to implement. type planNode interface { // Initializes or Re-Initializes an existing planNode, often called internally by Start(). @@ -525,7 +520,11 @@ func (p *Planner) RunSubscriptionRequest( return nil, err } - return p.executeRequest(ctx, planNode) + data, err := p.executeRequest(ctx, planNode) + if err != nil { + return nil, err + } + return data, nil } // MakePlan makes a plan from the parsed request. diff --git a/planner/scan.go b/planner/scan.go index d8fb2c34c0..43bf47e27a 100644 --- a/planner/scan.go +++ b/planner/scan.go @@ -16,18 +16,18 @@ import ( "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/db/fetcher" + "github.com/sourcenetwork/defradb/lens" "github.com/sourcenetwork/defradb/planner/mapper" + "github.com/sourcenetwork/defradb/request/graphql/parser" ) +// scanExecInfo contains information about the execution of a scan. type scanExecInfo struct { // Total number of times scan was issued. iterations uint64 - // Total number of times attempted to fetch documents. - docFetches uint64 - - // Total number of documents that matched / passed the filter. - filterMatches uint64 + // Information about fetches. + fetches fetcher.ExecInfo } // scans an index for records @@ -38,7 +38,7 @@ type scanNode struct { p *Planner desc client.CollectionDescription - fields []*client.FieldDescription + fields []client.FieldDescription docKey []byte showDeleted bool @@ -47,8 +47,7 @@ type scanNode struct { reverse bool filter *mapper.Filter - - scanInitialized bool + slct *mapper.Select fetcher fetcher.Fetcher @@ -61,7 +60,16 @@ func (n *scanNode) Kind() string { func (n *scanNode) Init() error { // init the fetcher - if err := n.fetcher.Init(&n.desc, n.fields, n.reverse, n.showDeleted); err != nil { + if err := n.fetcher.Init( + n.p.ctx, + n.p.txn, + &n.desc, + n.fields, + n.filter, + n.slct.DocumentMapping, + n.reverse, + n.showDeleted, + ); err != nil { return err } return n.initScan() @@ -69,9 +77,63 @@ func (n *scanNode) Init() error { func (n *scanNode) initCollection(desc client.CollectionDescription) error { n.desc = desc + return n.initFields(n.slct.Fields) +} + +func (n *scanNode) initFields(fields []mapper.Requestable) error { + for _, r := range fields { + // add all the possible base level fields the fetcher is responsible + // for, including those that are needed by higher level aggregates + // or grouping alls, which them selves might have further dependents + switch requestable := r.(type) { + // field is simple as its just a base level field + case *mapper.Field: + n.tryAddField(requestable.GetName()) + // select might have its own select fields and filters fields + case *mapper.Select: + n.tryAddField(requestable.Field.Name + "_id") // foreign key for type joins + err := n.initFields(requestable.Fields) + if err != nil { + return err + } + // aggregate might have its own target fields and filter fields + case *mapper.Aggregate: + for _, target := range requestable.AggregateTargets { + if target.Filter != nil { + fieldDescs, err := parser.ParseFilterFieldsForDescription( + target.Filter.ExternalConditions, + n.desc.Schema, + ) + if err != nil { + return err + } + for _, fd := range fieldDescs { + n.tryAddField(fd.Name) + } + } + if target.ChildTarget.HasValue { + n.tryAddField(target.ChildTarget.Name) + } else { + n.tryAddField(target.Field.Name) + } + } + } + } return nil } +func (n *scanNode) tryAddField(fieldName string) bool { + fd, ok := n.desc.Schema.GetField(fieldName) + if !ok { + // skip fields that are not part of the + // schema description. The scanner (and fetcher) + // is only responsible for basic fields + return false + } + n.fields = append(n.fields, fd) + return true +} + // Start starts the internal logic of the scanner // like the DocumentFetcher, and more. func (n *scanNode) Start() error { @@ -84,12 +146,11 @@ func (n *scanNode) initScan() error { n.spans = core.NewSpans(core.NewSpan(start, start.PrefixEnd())) } - err := n.fetcher.Start(n.p.ctx, n.p.txn, n.spans) + err := n.fetcher.Start(n.p.ctx, n.spans) if err != nil { return err } - n.scanInitialized = true return nil } @@ -103,32 +164,25 @@ func (n *scanNode) Next() (bool, error) { return false, nil } - // keep scanning until we find a doc that passes the filter - for { - var err error - n.docKey, n.currentValue, err = n.fetcher.FetchNextDoc(n.p.ctx, n.documentMapping) - if err != nil { - return false, err - } - n.execInfo.docFetches++ + var err error + var execInfo fetcher.ExecInfo + n.docKey, n.currentValue, execInfo, err = n.fetcher.FetchNextDoc(n.p.ctx, n.documentMapping) + if err != nil { + return false, err + } + n.execInfo.fetches.Add(execInfo) - if len(n.currentValue.Fields) == 0 { - return false, nil - } - n.documentMapping.SetFirstOfName( - &n.currentValue, - request.DeletedFieldName, - n.currentValue.Status.IsDeleted(), - ) - passed, err := mapper.RunFilter(n.currentValue, n.filter) - if err != nil { - return false, err - } - if passed { - n.execInfo.filterMatches++ - return true, nil - } + if len(n.currentValue.Fields) == 0 { + return false, nil } + + n.documentMapping.SetFirstOfName( + &n.currentValue, + request.DeletedFieldName, + n.currentValue.Status.IsDeleted(), + ) + + return true, nil } func (n *scanNode) Spans(spans core.Spans) { @@ -160,10 +214,10 @@ func (n *scanNode) simpleExplain() (map[string]any, error) { simpleExplainMap := map[string]any{} // Add the filter attribute if it exists. - if n.filter == nil || n.filter.ExternalConditions == nil { + if n.filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = n.filter.ExternalConditions + simpleExplainMap[filterLabel] = n.filter.ToMap(n.documentMapping) } // Add the collection attributes. @@ -176,11 +230,11 @@ func (n *scanNode) simpleExplain() (map[string]any, error) { return simpleExplainMap, nil } -func (n *scanNode) excuteExplain() map[string]any { +func (n *scanNode) executeExplain() map[string]any { return map[string]any{ - "iterations": n.execInfo.iterations, - "docFetches": n.execInfo.docFetches, - "filterMatches": n.execInfo.filterMatches, + "iterations": n.execInfo.iterations, + "docFetches": n.execInfo.fetches.DocsFetched, + "fieldFetches": n.execInfo.fetches.FieldsFetched, } } @@ -192,7 +246,7 @@ func (n *scanNode) Explain(explainType request.ExplainType) (map[string]any, err return n.simpleExplain() case request.ExecuteExplain: - return n.excuteExplain(), nil + return n.executeExplain(), nil default: return nil, ErrUnknownExplainRequestType @@ -202,18 +256,30 @@ func (n *scanNode) Explain(explainType request.ExplainType) (map[string]any, err // Merge implements mergeNode func (n *scanNode) Merge() bool { return true } -func (p *Planner) Scan(parsed *mapper.Select) *scanNode { +func (p *Planner) Scan(parsed *mapper.Select) (*scanNode, error) { var f fetcher.Fetcher if parsed.Cid.HasValue() { f = new(fetcher.VersionedFetcher) } else { f = new(fetcher.DocumentFetcher) + f = lens.NewFetcher(f, p.db.LensRegistry()) } - return &scanNode{ + scan := &scanNode{ p: p, fetcher: f, - docMapper: docMapper{&parsed.DocumentMapping}, + slct: parsed, + docMapper: docMapper{parsed.DocumentMapping}, + } + + colDesc, err := p.getCollectionDesc(parsed.CollectionName) + if err != nil { + return nil, err + } + err = scan.initCollection(colDesc) + if err != nil { + return nil, err } + return scan, nil } // multiScanNode is a buffered scanNode that has diff --git a/planner/select.go b/planner/select.go index a1d86bcddc..4fb9b143f2 100644 --- a/planner/select.go +++ b/planner/select.go @@ -115,7 +115,7 @@ type selectNode struct { // are defined in the subtype scan node. filter *mapper.Filter - docKeys immutable.Option[[]string] + keys immutable.Option[[]string] selectReq *mapper.Select groupSelects []*mapper.Select @@ -167,9 +167,9 @@ func (n *selectNode) Next() (bool, error) { n.execInfo.filterMatches++ - if n.docKeys.HasValue() { + if n.keys.HasValue() { docKey := n.currentValue.GetKey() - for _, key := range n.docKeys.Value() { + for _, key := range n.keys.Value() { if docKey == key { return true, nil } @@ -194,10 +194,17 @@ func (n *selectNode) simpleExplain() (map[string]any, error) { simpleExplainMap := map[string]any{} // Add the filter attribute if it exists. - if n.filter == nil || n.filter.ExternalConditions == nil { + if n.filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = n.filter.ExternalConditions + simpleExplainMap[filterLabel] = n.filter.ToMap(n.documentMapping) + } + + // Add the keys attribute if it exists. + if !n.keys.HasValue() { + simpleExplainMap[keysLabel] = nil + } else { + simpleExplainMap[keysLabel] = n.keys.Value() } return simpleExplainMap, nil @@ -359,7 +366,6 @@ func (n *selectNode) addTypeIndexJoin(subSelect *mapper.Select) error { if err != nil { return err } - if err := n.addSubPlan(subSelect.Index, typeIndexJoin); err != nil { return err } @@ -405,9 +411,9 @@ func (p *Planner) SelectFromSource( source: source, origSource: source, selectReq: selectReq, - docMapper: docMapper{&selectReq.DocumentMapping}, + docMapper: docMapper{selectReq.DocumentMapping}, filter: selectReq.Filter, - docKeys: selectReq.DocKeys, + keys: selectReq.DocKeys, } limit := selectReq.Limit orderBy := selectReq.OrderBy @@ -452,7 +458,7 @@ func (p *Planner) SelectFromSource( order: orderPlan, group: groupPlan, aggregates: aggregates, - docMapper: docMapper{&selectReq.DocumentMapping}, + docMapper: docMapper{selectReq.DocumentMapping}, } return top, nil } @@ -462,9 +468,9 @@ func (p *Planner) Select(selectReq *mapper.Select) (planNode, error) { s := &selectNode{ planner: p, filter: selectReq.Filter, - docKeys: selectReq.DocKeys, + keys: selectReq.DocKeys, selectReq: selectReq, - docMapper: docMapper{&selectReq.DocumentMapping}, + docMapper: docMapper{selectReq.DocumentMapping}, } limit := selectReq.Limit orderBy := selectReq.OrderBy @@ -496,7 +502,7 @@ func (p *Planner) Select(selectReq *mapper.Select) (planNode, error) { order: orderPlan, group: groupPlan, aggregates: aggregates, - docMapper: docMapper{&selectReq.DocumentMapping}, + docMapper: docMapper{selectReq.DocumentMapping}, } return top, nil } diff --git a/planner/sum.go b/planner/sum.go index 7bb14f2501..0e1690898e 100644 --- a/planner/sum.go +++ b/planner/sum.go @@ -61,7 +61,7 @@ func (p *Planner) Sum( isFloat: isFloat, aggregateMapping: field.AggregateTargets, virtualFieldIndex: field.Index, - docMapper: docMapper{&field.DocumentMapping}, + docMapper: docMapper{field.DocumentMapping}, }, nil } @@ -82,7 +82,7 @@ func (p *Planner) isValueFloat( return false, err } - fieldDescription, fieldDescriptionFound := parentDescription.GetField(source.Name) + fieldDescription, fieldDescriptionFound := parentDescription.Schema.GetField(source.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.Name) } @@ -130,7 +130,7 @@ func (p *Planner) isValueFloat( return false, err } - fieldDescription, fieldDescriptionFound := childCollectionDescription.GetField(source.ChildTarget.Name) + fieldDescription, fieldDescriptionFound := childCollectionDescription.Schema.GetField(source.ChildTarget.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.ChildTarget.Name) } @@ -163,10 +163,19 @@ func (n *sumNode) simpleExplain() (map[string]any, error) { simpleExplainMap := map[string]any{} // Add the filter attribute if it exists. - if source.Filter == nil || source.Filter.ExternalConditions == nil { + if source.Filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = source.Filter.ExternalConditions + // get the target aggregate document mapping. Since the filters + // are relative to the target aggregate collection (and doc mapper). + var targetMap *core.DocumentMapping + if source.Index < len(n.documentMapping.ChildMappings) && + n.documentMapping.ChildMappings[source.Index] != nil { + targetMap = n.documentMapping.ChildMappings[source.Index] + } else { + targetMap = n.documentMapping + } + simpleExplainMap[filterLabel] = source.Filter.ToMap(targetMap) } // Add the main field name. diff --git a/planner/top.go b/planner/top.go index 1f7764e091..93e530b2fc 100644 --- a/planner/top.go +++ b/planner/top.go @@ -186,7 +186,7 @@ func (n *topLevelNode) Next() (bool, error) { // Top creates a new topLevelNode using the given Select. func (p *Planner) Top(m *mapper.Select) (*topLevelNode, error) { node := topLevelNode{ - docMapper: docMapper{&m.DocumentMapping}, + docMapper: docMapper{m.DocumentMapping}, } aggregateChildren := []planNode{} @@ -209,6 +209,7 @@ func (p *Planner) Top(m *mapper.Select) (*topLevelNode, error) { } aggregateChildren = append(aggregateChildren, child) aggregateChildIndexes = append(aggregateChildIndexes, field.GetIndex()) + case *mapper.Select: child, err := p.Select(f) if err != nil { diff --git a/planner/type_join.go b/planner/type_join.go index b8dda2ebd2..1bab12b60f 100644 --- a/planner/type_join.go +++ b/planner/type_join.go @@ -79,7 +79,7 @@ func (p *Planner) makeTypeIndexJoin( var err error desc := parent.sourceInfo.collectionDescription - typeFieldDesc, ok := desc.GetField(subType.Name) + typeFieldDesc, ok := desc.Schema.GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -138,7 +138,6 @@ func (n *typeIndexJoin) simpleExplain() (map[string]any, error) { joinDirectionLabel = "direction" joinDirectionPrimaryLabel = "primary" joinDirectionSecondaryLabel = "secondary" - joinSubTypeLabel = "subType" joinSubTypeNameLabel = "subTypeName" joinRootLabel = "rootName" ) @@ -228,12 +227,17 @@ func splitFilterByType(filter *mapper.Filter, subType int) (*mapper.Filter, *map keyFound, sub := removeConditionIndex(conditionKey, filter.Conditions) if !keyFound { - return filter, &mapper.Filter{} + return filter, nil } // create new splitup filter // our schema ensures that if sub exists, its of type map[string]any splitF := &mapper.Filter{Conditions: map[connor.FilterKey]any{conditionKey: sub}} + + // check if we have any remaining filters + if len(filter.Conditions) == 0 { + return nil, splitF + } return filter, splitF } @@ -264,7 +268,15 @@ func (p *Planner) makeTypeJoinOne( ) (*typeJoinOne, error) { // split filter if scan, ok := source.(*scanNode); ok { - scan.filter, parent.filter = splitFilterByType(scan.filter, subType.Index) + var parentfilter *mapper.Filter + scan.filter, parentfilter = splitFilterByType(scan.filter, subType.Index) + if parentfilter != nil { + if parent.filter == nil { + parent.filter = new(mapper.Filter) + } + parent.filter.Conditions = mergeFilterConditions( + parent.filter.Conditions, parentfilter.Conditions) + } subType.ShowDeleted = parent.selectReq.ShowDeleted } @@ -274,7 +286,7 @@ func (p *Planner) makeTypeJoinOne( } // get the correct sub field schema type (collection) - subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.GetField(subType.Name) + subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.Schema.GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -335,50 +347,56 @@ func (n *typeJoinOne) Next() (bool, error) { doc := n.root.Value() if n.primary { - n.currentValue = n.valuesPrimary(doc) + n.currentValue, err = n.valuesPrimary(doc) } else { - n.currentValue = n.valuesSecondary(doc) + n.currentValue, err = n.valuesSecondary(doc) + } + + if err != nil { + return false, err } + return true, nil } -func (n *typeJoinOne) valuesSecondary(doc core.Doc) core.Doc { +func (n *typeJoinOne) valuesSecondary(doc core.Doc) (core.Doc, error) { fkIndex := &mapper.PropertyIndex{ - Index: n.subType.DocumentMap().FirstIndexOfName(n.subTypeFieldName + "_id"), + Index: n.subType.DocumentMap().FirstIndexOfName(n.subTypeFieldName + request.RelatedObjectID), } filter := map[connor.FilterKey]any{ - fkIndex: doc.GetKey(), - } - - // We have to reset the scan node after appending the new key-filter - if err := n.subType.Init(); err != nil { - log.ErrorE(n.p.ctx, "Sub-type initialization error at scan node reset", err) - return doc + fkIndex: map[connor.FilterKey]any{ + mapper.FilterEqOp: doc.GetKey(), + }, } // using the doc._key as a filter err := appendFilterToScanNode(n.subType, filter) if err != nil { - return core.Doc{} + return core.Doc{}, err + } + + // We have to reset the scan node after appending the new key-filter + if err := n.subType.Init(); err != nil { + return doc, NewErrSubTypeInit(err) } next, err := n.subType.Next() if !next || err != nil { - return doc + return doc, err } subdoc := n.subType.Value() doc.Fields[n.subSelect.Index] = subdoc - return doc + return doc, nil } -func (n *typeJoinOne) valuesPrimary(doc core.Doc) core.Doc { +func (n *typeJoinOne) valuesPrimary(doc core.Doc) (core.Doc, error) { // get the subtype doc key - subDocKey := n.docMapper.documentMapping.FirstOfName(doc, n.subTypeName+"_id") + subDocKey := n.docMapper.documentMapping.FirstOfName(doc, n.subTypeName+request.RelatedObjectID) subDocKeyStr, ok := subDocKey.(string) if !ok { - return doc + return doc, nil } // create the collection key for the sub doc @@ -394,8 +412,7 @@ func (n *typeJoinOne) valuesPrimary(doc core.Doc) core.Doc { // re-initialize the sub type plan if err := n.subType.Init(); err != nil { - log.ErrorE(n.p.ctx, "Sub-type initialization error at scan node reset", err) - return doc + return doc, NewErrSubTypeInit(err) } // if we don't find any docs from our point span lookup @@ -404,18 +421,17 @@ func (n *typeJoinOne) valuesPrimary(doc core.Doc) core.Doc { next, err := n.subType.Next() if err != nil { - log.ErrorE(n.p.ctx, "Sub-type initialization error at scan node reset", err) - return doc + return doc, err } if !next { - return doc + return doc, nil } subDoc := n.subType.Value() doc.Fields[n.subSelect.Index] = subDoc - return doc + return doc, nil } func (n *typeJoinOne) Close() error { @@ -453,7 +469,15 @@ func (p *Planner) makeTypeJoinMany( ) (*typeJoinMany, error) { // split filter if scan, ok := source.(*scanNode); ok { - scan.filter, parent.filter = splitFilterByType(scan.filter, subType.Index) + var parentfilter *mapper.Filter + scan.filter, parentfilter = splitFilterByType(scan.filter, subType.Index) + if parentfilter != nil { + if parent.filter == nil { + parent.filter = new(mapper.Filter) + } + parent.filter.Conditions = mergeFilterConditions( + parent.filter.Conditions, parentfilter.Conditions) + } subType.ShowDeleted = parent.selectReq.ShowDeleted } @@ -462,7 +486,7 @@ func (p *Planner) makeTypeJoinMany( return nil, err } - subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.GetField(subType.Name) + subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.Schema.GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -526,11 +550,14 @@ func (n *typeJoinMany) Next() (bool, error) { // @todo: handle index for one-to-many setup } else { fkIndex := &mapper.PropertyIndex{ - Index: n.subSelect.FirstIndexOfName(n.rootName + "_id"), + Index: n.subSelect.FirstIndexOfName(n.rootName + request.RelatedObjectID), } filter := map[connor.FilterKey]any{ - fkIndex: n.currentValue.GetKey(), // user_id: "bae-ALICE" | user_id: "bae-CHARLIE" + fkIndex: map[connor.FilterKey]any{ + mapper.FilterEqOp: n.currentValue.GetKey(), + }, } + // using the doc._key as a filter err := appendFilterToScanNode(n.subType, filter) if err != nil { @@ -574,19 +601,11 @@ func appendFilterToScanNode(plan planNode, filterCondition map[connor.FilterKey] switch node := plan.(type) { case *scanNode: filter := node.filter - if filter == nil { + if filter == nil && len(filterCondition) > 0 { filter = mapper.NewFilter() } - // merge filter conditions - for k, v := range filterCondition { - indexKey, isIndexKey := k.(*mapper.PropertyIndex) - if !isIndexKey { - continue - } - removeConditionIndex(indexKey, filter.Conditions) - filter.Conditions[k] = v - } + filter.Conditions = mergeFilterConditions(filter.Conditions, filterCondition) node.filter = filter case nil: @@ -597,6 +616,23 @@ func appendFilterToScanNode(plan planNode, filterCondition map[connor.FilterKey] return nil } +// merge into dest with src, return dest +func mergeFilterConditions(dest map[connor.FilterKey]any, src map[connor.FilterKey]any) map[connor.FilterKey]any { + if dest == nil { + dest = make(map[connor.FilterKey]any) + } + // merge filter conditions + for k, v := range src { + indexKey, isIndexKey := k.(*mapper.PropertyIndex) + if !isIndexKey { + continue + } + removeConditionIndex(indexKey, dest) + dest[k] = v + } + return dest +} + func removeConditionIndex( key *mapper.PropertyIndex, filterConditions map[connor.FilterKey]any, diff --git a/planner/update.go b/planner/update.go index c13663ad77..36b5487c5e 100644 --- a/planner/update.go +++ b/planner/update.go @@ -118,10 +118,10 @@ func (n *updateNode) simpleExplain() (map[string]any, error) { simpleExplainMap[idsLabel] = n.ids // Add the filter attribute if it exists, otherwise have it nil. - if n.filter == nil || n.filter.ExternalConditions == nil { + if n.filter == nil { simpleExplainMap[filterLabel] = nil } else { - simpleExplainMap[filterLabel] = n.filter.ExternalConditions + simpleExplainMap[filterLabel] = n.filter.ToMap(n.documentMapping) } // Add the attribute that represents the patch to update with. @@ -160,7 +160,7 @@ func (p *Planner) UpdateDocs(parsed *mapper.Mutation) (planNode, error) { ids: parsed.DocKeys.Value(), isUpdating: true, patch: parsed.Data, - docMapper: docMapper{&parsed.DocumentMapping}, + docMapper: docMapper{parsed.DocumentMapping}, } // get collection diff --git a/playground/.gitignore b/playground/.gitignore new file mode 100644 index 0000000000..a547bf36d8 --- /dev/null +++ b/playground/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/playground/README.md b/playground/README.md new file mode 100644 index 0000000000..3018611396 --- /dev/null +++ b/playground/README.md @@ -0,0 +1,27 @@ +# DefraDB Playground + +A web based playground for DefraDB. + +## Developing + +Run a development server bound to `localhost:5173`. + +```bash +npm install +npm run dev +``` + +Start DefraDB with CORS allowed. + +```bash +defradb start --allowed-origins="*" +``` + +## Building + +Create a static build and output files to `./dist`. + +```bash +npm install +npm run build +``` diff --git a/playground/index.html b/playground/index.html new file mode 100644 index 0000000000..ab81ce5bf4 --- /dev/null +++ b/playground/index.html @@ -0,0 +1,13 @@ + + + + + + + DefraDB Playground + + + + + + diff --git a/playground/package-lock.json b/playground/package-lock.json new file mode 100644 index 0000000000..9eb027211f --- /dev/null +++ b/playground/package-lock.json @@ -0,0 +1,3464 @@ +{ + "name": "playground", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "playground", + "version": "0.0.0", + "dependencies": { + "@tanstack/react-query": "^4.32.0", + "fast-json-patch": "^3.1.1", + "graphiql": "^3.0.4", + "graphql": "^16.7.1", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-hook-form": "^7.45.2" + }, + "devDependencies": { + "@types/react": "^18.2.15", + "@types/react-dom": "^18.2.7", + "@typescript-eslint/eslint-plugin": "^5.59.0", + "@typescript-eslint/parser": "^5.62.0", + "@vitejs/plugin-react-swc": "^3.0.0", + "eslint": "^8.45.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.3.4", + "typescript": "^5.0.2", + "vite": "^4.3.9" + } + }, + "node_modules/@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz", + "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.6.tgz", + "integrity": "sha512-wDb5pWm4WDdF6LFUde3Jl8WzPA+3ZbxYqkC6xAXuD3irdEHN1k0NfTRrJD8ZD378SJ61miMLCqIOXYhd8x+AJQ==", + "dependencies": { + "regenerator-runtime": "^0.13.11" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@codemirror/language": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.0.0.tgz", + "integrity": "sha512-rtjk5ifyMzOna1c7PBu7J1VCt0PvA5wy3o8eMVnxMKb7z8KA7JFecvD04dSn14vj/bBaAbqRsGed5OjtofEnLA==", + "peer": true, + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/common": "^1.0.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0", + "style-mod": "^4.0.0" + } + }, + "node_modules/@codemirror/state": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", + "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==", + "peer": true + }, + "node_modules/@codemirror/view": { + "version": "6.15.3", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.15.3.tgz", + "integrity": "sha512-chNgR8H7Ipx7AZUt0+Kknk7BCow/ron3mHd1VZdM7hQXiI79+UlWqcxpCiexTxZQ+iSkqndk3HHAclJOcjSuog==", + "peer": true, + "dependencies": { + "@codemirror/state": "^6.1.4", + "style-mod": "^4.0.0", + "w3c-keyname": "^2.2.4" + } + }, + "node_modules/@emotion/is-prop-valid": { + "version": "0.8.8", + "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-0.8.8.tgz", + "integrity": "sha512-u5WtneEAr5IDG2Wv65yhunPSMLIpuKsbuOktRojfrEiEvRyC85LgPMZI63cr7NUqT8ZIGdSVg8ZKGxIug4lXcA==", + "optional": true, + "dependencies": { + "@emotion/memoize": "0.7.4" + } + }, + "node_modules/@emotion/memoize": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.7.4.tgz", + "integrity": "sha512-Ja/Vfqe3HpuzRsG1oBtWTHk2PGZ7GR+2Vz5iYGelAw8dx32K0y7PjVuxK6z1nMpZOqAFsRUPCkK1YjJ56qJlgw==", + "optional": true + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.17.19.tgz", + "integrity": "sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.5.1.tgz", + "integrity": "sha512-Z5ba73P98O1KUYCCJTUeVpja9RcGoMdncZ6T49FCUl2lN38JtCJ+3WgIDBv0AuY4WChU5PmtJmOCTlN6FZTFKQ==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.0.tgz", + "integrity": "sha512-Lj7DECXqIVCqnqjjHMPna4vn6GJcMgul/wuS0je9OZ9gsL0zzDpKPVtcG1HaDVc+9y+qgXneTeUMbCqXJNpH1A==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/js": { + "version": "8.44.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.44.0.tgz", + "integrity": "sha512-Ag+9YM4ocKQx9AarydN0KY2j0ErMHNIocPDrVo8zAE44xLTjEtz81OdR68/cydGtk6m6jDb5Za3r2useMzYmSw==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.3.1.tgz", + "integrity": "sha512-Bu+AMaXNjrpjh41znzHqaz3r2Nr8hHuHZT6V2LBKMhyMl0FgKA62PNYbqnfgmzOhoWZj70Zecisbo4H1rotP5g==" + }, + "node_modules/@floating-ui/dom": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.4.5.tgz", + "integrity": "sha512-96KnRWkRnuBSSFbj0sFGwwOUd8EkiecINVl0O9wiZlZ64EkpyAOG3Xc2vKKNJmru0Z7RqWNymA+6b8OZqjgyyw==", + "dependencies": { + "@floating-ui/core": "^1.3.1" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.1.tgz", + "integrity": "sha512-rZtAmSht4Lry6gdhAJDrCp/6rKN7++JnL1/Anbr/DdeyYXQPxvg/ivrbYvJulbRf4vL8b212suwMM2lxbv+RQA==", + "dependencies": { + "@floating-ui/dom": "^1.3.0" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@graphiql/react": { + "version": "0.19.2", + "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.19.2.tgz", + "integrity": "sha512-xdcLLUHr15AUxtv9Jyw7Mlf6Vd9EJb8ULImHTJOzDgW7DNAGUdU6Yu7xTGP/eCx+RrOQON1Bschv8Mjxk56tYg==", + "dependencies": { + "@graphiql/toolkit": "^0.9.1", + "@headlessui/react": "^1.7.15", + "@radix-ui/react-dialog": "^1.0.4", + "@radix-ui/react-dropdown-menu": "^2.0.5", + "@radix-ui/react-tooltip": "^1.0.6", + "@radix-ui/react-visually-hidden": "^1.0.3", + "@types/codemirror": "^5.60.8", + "clsx": "^1.2.1", + "codemirror": "^5.65.3", + "codemirror-graphql": "^2.0.9", + "copy-to-clipboard": "^3.2.0", + "framer-motion": "^6.5.1", + "graphql-language-service": "^5.1.7", + "markdown-it": "^12.2.0", + "set-value": "^4.1.0" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0", + "react": "^16.8.0 || ^17 || ^18", + "react-dom": "^16.8.0 || ^17 || ^18" + } + }, + "node_modules/@graphiql/toolkit": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/@graphiql/toolkit/-/toolkit-0.9.1.tgz", + "integrity": "sha512-LVt9pdk0830so50ZnU2Znb2rclcoWznG8r8asqAENzV0U1FM1kuY0sdPpc/rBc9MmmNgnB6A+WZzDhq6dbhTHA==", + "dependencies": { + "@n1ru4l/push-pull-async-iterable-iterator": "^3.1.0", + "meros": "^1.1.4" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0", + "graphql-ws": ">= 4.5.0" + }, + "peerDependenciesMeta": { + "graphql-ws": { + "optional": true + } + } + }, + "node_modules/@headlessui/react": { + "version": "1.7.15", + "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.15.tgz", + "integrity": "sha512-OTO0XtoRQ6JPB1cKNFYBZv2Q0JMqMGNhYP1CjPvcJvjz8YGokz8oAj89HIYZGN0gZzn/4kk9iUpmMF4Q21Gsqw==", + "dependencies": { + "client-only": "^0.0.1" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "react": "^16 || ^17 || ^18", + "react-dom": "^16 || ^17 || ^18" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.10.tgz", + "integrity": "sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^1.2.1", + "debug": "^4.1.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", + "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", + "dev": true + }, + "node_modules/@lezer/common": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", + "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==", + "peer": true + }, + "node_modules/@lezer/highlight": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.1.6.tgz", + "integrity": "sha512-cmSJYa2us+r3SePpRCjN5ymCqCPv+zyXmDl0ciWtVaNiORT/MxM7ZgOMQZADD0o51qOaOg24qc/zBViOIwAjJg==", + "peer": true, + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@lezer/lr": { + "version": "1.3.9", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.9.tgz", + "integrity": "sha512-XPz6dzuTHlnsbA5M2DZgjflNQ+9Hi5Swhic0RULdp3oOs3rh6bqGZolosVqN/fQIT8uNiepzINJDnS39oweTHQ==", + "peer": true, + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@motionone/animation": { + "version": "10.15.1", + "resolved": "https://registry.npmjs.org/@motionone/animation/-/animation-10.15.1.tgz", + "integrity": "sha512-mZcJxLjHor+bhcPuIFErMDNyrdb2vJur8lSfMCsuCB4UyV8ILZLvK+t+pg56erv8ud9xQGK/1OGPt10agPrCyQ==", + "dependencies": { + "@motionone/easing": "^10.15.1", + "@motionone/types": "^10.15.1", + "@motionone/utils": "^10.15.1", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/animation/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/@motionone/dom": { + "version": "10.12.0", + "resolved": "https://registry.npmjs.org/@motionone/dom/-/dom-10.12.0.tgz", + "integrity": "sha512-UdPTtLMAktHiqV0atOczNYyDd/d8Cf5fFsd1tua03PqTwwCe/6lwhLSQ8a7TbnQ5SN0gm44N1slBfj+ORIhrqw==", + "dependencies": { + "@motionone/animation": "^10.12.0", + "@motionone/generators": "^10.12.0", + "@motionone/types": "^10.12.0", + "@motionone/utils": "^10.12.0", + "hey-listen": "^1.0.8", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/dom/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/@motionone/easing": { + "version": "10.15.1", + "resolved": "https://registry.npmjs.org/@motionone/easing/-/easing-10.15.1.tgz", + "integrity": "sha512-6hIHBSV+ZVehf9dcKZLT7p5PEKHGhDwky2k8RKkmOvUoYP3S+dXsKupyZpqx5apjd9f+php4vXk4LuS+ADsrWw==", + "dependencies": { + "@motionone/utils": "^10.15.1", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/easing/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/@motionone/generators": { + "version": "10.15.1", + "resolved": "https://registry.npmjs.org/@motionone/generators/-/generators-10.15.1.tgz", + "integrity": "sha512-67HLsvHJbw6cIbLA/o+gsm7h+6D4Sn7AUrB/GPxvujse1cGZ38F5H7DzoH7PhX+sjvtDnt2IhFYF2Zp1QTMKWQ==", + "dependencies": { + "@motionone/types": "^10.15.1", + "@motionone/utils": "^10.15.1", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/generators/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/@motionone/types": { + "version": "10.15.1", + "resolved": "https://registry.npmjs.org/@motionone/types/-/types-10.15.1.tgz", + "integrity": "sha512-iIUd/EgUsRZGrvW0jqdst8st7zKTzS9EsKkP+6c6n4MPZoQHwiHuVtTQLD6Kp0bsBLhNzKIBlHXponn/SDT4hA==" + }, + "node_modules/@motionone/utils": { + "version": "10.15.1", + "resolved": "https://registry.npmjs.org/@motionone/utils/-/utils-10.15.1.tgz", + "integrity": "sha512-p0YncgU+iklvYr/Dq4NobTRdAPv9PveRDUXabPEeOjBLSO/1FNB2phNTZxOxpi1/GZwYpAoECEa0Wam+nsmhSw==", + "dependencies": { + "@motionone/types": "^10.15.1", + "hey-listen": "^1.0.8", + "tslib": "^2.3.1" + } + }, + "node_modules/@motionone/utils/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/@n1ru4l/push-pull-async-iterable-iterator": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@n1ru4l/push-pull-async-iterable-iterator/-/push-pull-async-iterable-iterator-3.2.0.tgz", + "integrity": "sha512-3fkKj25kEjsfObL6IlKPAlHYPq/oYwUkkQ03zsTTiDjD7vg/RxjdiLeCydqtxHZP0JgsXL3D/X5oAkMGzuUp/Q==", + "engines": { + "node": ">=12" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@radix-ui/primitive": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.0.1.tgz", + "integrity": "sha512-yQ8oGX2GVsEYMWGxcovu1uGWPCxV5BFfeeYxqPmuAzUyLT9qmaMXSAhXpb0WrspIeqYzdJpkh2vHModJPgRIaw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, + "node_modules/@radix-ui/react-arrow": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.0.3.tgz", + "integrity": "sha512-wSP+pHsB/jQRaL6voubsQ/ZlrGBHHrOjmBnr19hxYgtS0WvAFwZhK2WP/YY5yF9uKECCEEDGxuLxq1NBK51wFA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-collection": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.0.3.tgz", + "integrity": "sha512-3SzW+0PW7yBBoQlT8wNcGtaxaD0XSu0uLUFgrtHY08Acx05TaHaOmVLR73c0j/cqpDy53KBMO7s0dx2wmOIDIA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.0.1.tgz", + "integrity": "sha512-fDSBgd44FKHa1FRMU59qBMPFcl2PZE+2nmqunj+BWFyYYjnhIDWL2ItDs3rrbJDQOtzt5nIebLCQc4QRfz6LJw==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-context": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.0.1.tgz", + "integrity": "sha512-ebbrdFoYTcuZ0v4wG5tedGnp9tzcV8awzsxYph7gXUyvnNLuTIcCk1q17JEbnVhXAKG9oX3KtchwiMIAYp9NLg==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.0.4.tgz", + "integrity": "sha512-hJtRy/jPULGQZceSAP2Re6/4NpKo8im6V8P2hUqZsdFiSL8l35kYsw3qbRI6Ay5mQd2+wlLqje770eq+RJ3yZg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-focus-guards": "1.0.1", + "@radix-ui/react-focus-scope": "1.0.3", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-controllable-state": "1.0.1", + "aria-hidden": "^1.1.1", + "react-remove-scroll": "2.5.5" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-direction": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.0.1.tgz", + "integrity": "sha512-RXcvnXgyvYvBEOhCBuddKecVkoMiI10Jcm5cTI7abJRAHYfFxeu+FBQs/DvdxSYucxR5mna0dNsL6QFlds5TMA==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.4.tgz", + "integrity": "sha512-7UpBa/RKMoHJYjie1gkF1DlK8l1fdU/VKDpoS3rCCo8YBJR294GwcEHyxHw72yvphJ7ld0AXEcSLAzY2F/WyCg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-escape-keydown": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dropdown-menu": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.0.5.tgz", + "integrity": "sha512-xdOrZzOTocqqkCkYo8yRPCib5OkTkqN7lqNCdxwPOdE466DOaNl4N8PkUIlsXthQvW5Wwkd+aEmWpfWlBoDPEw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-menu": "2.0.5", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-controllable-state": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-guards": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.0.1.tgz", + "integrity": "sha512-Rect2dWbQ8waGzhMavsIbmSVCgYxkXLxxR3ZvCX79JOglzdEy4JXMb98lq4hPxUbLr77nP0UOGf4rcMU+s1pUA==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-focus-scope": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.0.3.tgz", + "integrity": "sha512-upXdPfqI4islj2CslyfUBNlaJCPybbqRHAi1KER7Isel9Q2AtSJ0zRBZv8mWQiFXD2nyAJ4BhC3yXgZ6kMBSrQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-id": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.0.1.tgz", + "integrity": "sha512-tI7sT/kqYp8p96yGWY1OAnLHrqDgzHefRBKQ2YAkBS5ja7QLcZ9Z/uY7bEjPUatf8RomoXM8/1sMj1IJaE5UzQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-menu": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.0.5.tgz", + "integrity": "sha512-Gw4f9pwdH+w5w+49k0gLjN0PfRDHvxmAgG16AbyJZ7zhwZ6PBHKtWohvnSwfusfnK3L68dpBREHpVkj8wEM7ZA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-collection": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-direction": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-focus-guards": "1.0.1", + "@radix-ui/react-focus-scope": "1.0.3", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-popper": "1.1.2", + "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-roving-focus": "1.0.4", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-callback-ref": "1.0.1", + "aria-hidden": "^1.1.1", + "react-remove-scroll": "2.5.5" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-popper": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.1.2.tgz", + "integrity": "sha512-1CnGGfFi/bbqtJZZ0P/NQY20xdG3E0LALJaLUEoKwPLwl6PPPfbeiCqMVQnhoFRAxjJj4RpBRJzDmUgsex2tSg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1", + "@radix-ui/react-use-rect": "1.0.1", + "@radix-ui/react-use-size": "1.0.1", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-portal": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.3.tgz", + "integrity": "sha512-xLYZeHrWoPmA5mEKEfZZevoVRK/Q43GfzRXkWV6qawIWWK8t6ifIiLQdd7rmQ4Vk1bmI21XhqF9BN3jWf+phpA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-presence": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.0.1.tgz", + "integrity": "sha512-UXLW4UAbIY5ZjcvzjfRFo5gxva8QirC9hF7wRE4U5gz+TP0DbRk+//qyuAQ1McDxBt1xNMBTaciFGvEmJvAZCg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-primitive": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-1.0.3.tgz", + "integrity": "sha512-yi58uVyoAcK/Nq1inRY56ZSjKypBNKTa/1mcL8qdl6oJeEaDbOldlzrGn7P6Q3Id5d+SYNGc5AJgc4vGhjs5+g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-slot": "1.0.2" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-roving-focus": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.0.4.tgz", + "integrity": "sha512-2mUg5Mgcu001VkGy+FfzZyzbmuUWzgWkj3rvv4yu+mLw03+mTzbxZHvfcGyFp2b8EkQeMkpRQ5FiA2Vr2O6TeQ==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-collection": "1.0.3", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-direction": "1.0.1", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-use-callback-ref": "1.0.1", + "@radix-ui/react-use-controllable-state": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.0.2.tgz", + "integrity": "sha512-YeTpuq4deV+6DusvVUW4ivBgnkHwECUu0BiN43L5UCDFgdhsRUWAghhTF5MbvNTPzmiFOx90asDSUjWuCNapwg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-compose-refs": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-tooltip": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.0.6.tgz", + "integrity": "sha512-DmNFOiwEc2UDigsYj6clJENma58OelxD24O4IODoZ+3sQc3Zb+L8w1EP+y9laTuKCLAysPw4fD6/v0j4KNV8rg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/primitive": "1.0.1", + "@radix-ui/react-compose-refs": "1.0.1", + "@radix-ui/react-context": "1.0.1", + "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-id": "1.0.1", + "@radix-ui/react-popper": "1.1.2", + "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-presence": "1.0.1", + "@radix-ui/react-primitive": "1.0.3", + "@radix-ui/react-slot": "1.0.2", + "@radix-ui/react-use-controllable-state": "1.0.1", + "@radix-ui/react-visually-hidden": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.0.1.tgz", + "integrity": "sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.0.1.tgz", + "integrity": "sha512-Svl5GY5FQeN758fWKrjM6Qb7asvXeiZltlT4U2gVfl8Gx5UAv2sMR0LWo8yhsIZh2oQ0eFdZ59aoOOMV7b47VA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.0.3.tgz", + "integrity": "sha512-vyL82j40hcFicA+M4Ex7hVkB9vHgSse1ZWomAqV2Je3RleKGO5iM8KMOEtfoSB0PnIelMd2lATjTGMYqN5ylTg==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-callback-ref": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.0.1.tgz", + "integrity": "sha512-v/5RegiJWYdoCvMnITBkNNx6bCj20fiaJnWtRkU18yITptraXjffz5Qbn05uOiQnOvi+dbkznkoaMltz1GnszQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-rect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-rect/-/react-use-rect-1.0.1.tgz", + "integrity": "sha512-Cq5DLuSiuYVKNU8orzJMbl15TXilTnJKUCltMVQg53BQOF1/C5toAaGrowkgksdBQ9H+SRL23g0HDmg9tvmxXw==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/rect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-size": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-size/-/react-use-size-1.0.1.tgz", + "integrity": "sha512-ibay+VqrgcaI6veAojjofPATwledXiSmX+C0KrBk/xgpX9rBzPV3OsfwlhQdUOFbh+LKQorLYT+xTXW9V8yd0g==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-use-layout-effect": "1.0.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-visually-hidden": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.0.3.tgz", + "integrity": "sha512-D4w41yN5YRKtu464TLnByKzMDG/JlMPHtfZgQAu9v6mNakUqGUI9vUrfQKz8NK41VMm/xbZbh76NUTVtIYqOMA==", + "dependencies": { + "@babel/runtime": "^7.13.10", + "@radix-ui/react-primitive": "1.0.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/rect": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@radix-ui/rect/-/rect-1.0.1.tgz", + "integrity": "sha512-fyrgCaedtvMg9NK3en0pnOYJdtfwxUcNolezkNPUsoX57X8oQk+NkqcvzHXD2uKNij6GXmWU9NDru2IWjrO4BQ==", + "dependencies": { + "@babel/runtime": "^7.13.10" + } + }, + "node_modules/@swc/core": { + "version": "1.3.62", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.3.62.tgz", + "integrity": "sha512-J58hWY+/G8vOr4J6ZH9hLg0lMSijZtqIIf4HofZezGog/pVX6sJyBJ40dZ1ploFkDIlWTWvJyqtpesBKS73gkQ==", + "dev": true, + "hasInstallScript": true, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/swc" + }, + "optionalDependencies": { + "@swc/core-darwin-arm64": "1.3.62", + "@swc/core-darwin-x64": "1.3.62", + "@swc/core-linux-arm-gnueabihf": "1.3.62", + "@swc/core-linux-arm64-gnu": "1.3.62", + "@swc/core-linux-arm64-musl": "1.3.62", + "@swc/core-linux-x64-gnu": "1.3.62", + "@swc/core-linux-x64-musl": "1.3.62", + "@swc/core-win32-arm64-msvc": "1.3.62", + "@swc/core-win32-ia32-msvc": "1.3.62", + "@swc/core-win32-x64-msvc": "1.3.62" + }, + "peerDependencies": { + "@swc/helpers": "^0.5.0" + }, + "peerDependenciesMeta": { + "@swc/helpers": { + "optional": true + } + } + }, + "node_modules/@swc/core-darwin-arm64": { + "version": "1.3.62", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.62.tgz", + "integrity": "sha512-MmGilibITz68LEje6vJlKzc2gUUSgzvB3wGLSjEORikTNeM7P8jXVxE4A8fgZqDeudJUm9HVWrxCV+pHDSwXhA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@tanstack/query-core": { + "version": "4.32.0", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-4.32.0.tgz", + "integrity": "sha512-ei4IYwL2kmlKSlCw9WgvV7PpXi0MiswVwfQRxawhJA690zWO3dU49igaQ/UMTl+Jy9jj9dK5IKAYvbX7kUvviQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/react-query": { + "version": "4.32.0", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-4.32.0.tgz", + "integrity": "sha512-B8WUMcByYAH9500ENejDCATOmEZhqjtS9wsfiQ3BNa+s+yAynY8SESI8WWHhSqUmjd0pmCSFRP6BOUGSda3QXA==", + "dependencies": { + "@tanstack/query-core": "4.32.0", + "use-sync-external-store": "^1.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-native": "*" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + } + } + }, + "node_modules/@types/codemirror": { + "version": "5.60.8", + "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-5.60.8.tgz", + "integrity": "sha512-VjFgDF/eB+Aklcy15TtOTLQeMjTo07k7KAjql8OK5Dirr7a6sJY4T1uVBDuTVG9VEmn1uUsohOpYnVfgC6/jyw==", + "dependencies": { + "@types/tern": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", + "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==" + }, + "node_modules/@types/json-schema": { + "version": "7.0.12", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz", + "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==", + "dev": true + }, + "node_modules/@types/prop-types": { + "version": "15.7.5", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", + "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==", + "devOptional": true + }, + "node_modules/@types/react": { + "version": "18.2.15", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.15.tgz", + "integrity": "sha512-oEjE7TQt1fFTFSbf8kkNuc798ahTUzn3Le67/PWjE8MAfYAD/qB7O8hSTcromLFqHCt9bcdOg5GXMokzTjJ5SA==", + "devOptional": true, + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.2.7", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.7.tgz", + "integrity": "sha512-GRaAEriuT4zp9N4p1i8BDBYmEyfo+xQ3yHjJU4eiK5NDa1RmUZG+unZABUTK4/Ox/M+GaHwb6Ow8rUITrtjszA==", + "devOptional": true, + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/scheduler": { + "version": "0.16.3", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", + "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==", + "devOptional": true + }, + "node_modules/@types/semver": { + "version": "7.5.0", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.0.tgz", + "integrity": "sha512-G8hZ6XJiHnuhQKR7ZmysCeJWE08o8T0AXtk5darsCaTVsYZhhgUrq53jizaR2FvsoeCwJhlmwTjkXBY5Pn/ZHw==", + "dev": true + }, + "node_modules/@types/tern": { + "version": "0.23.4", + "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.4.tgz", + "integrity": "sha512-JAUw1iXGO1qaWwEOzxTKJZ/5JxVeON9kvGZ/osgZaJImBnyjyn0cjovPsf6FNLmyGY8Vw9DoXZCMlfMkMwHRWg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.59.11.tgz", + "integrity": "sha512-XxuOfTkCUiOSyBWIvHlUraLw/JT/6Io1365RO6ZuI88STKMavJZPNMU0lFcUTeQXEhHiv64CbxYxBNoDVSmghg==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.4.0", + "@typescript-eslint/scope-manager": "5.59.11", + "@typescript-eslint/type-utils": "5.59.11", + "@typescript-eslint/utils": "5.59.11", + "debug": "^4.3.4", + "grapheme-splitter": "^1.0.4", + "ignore": "^5.2.0", + "natural-compare-lite": "^1.4.0", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^5.0.0", + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.62.0.tgz", + "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz", + "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz", + "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz", + "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz", + "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.62.0", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.59.11.tgz", + "integrity": "sha512-dHFOsxoLFtrIcSj5h0QoBT/89hxQONwmn3FOQ0GOQcLOOXm+MIrS8zEAhs4tWl5MraxCY3ZJpaXQQdFMc2Tu+Q==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.11", + "@typescript-eslint/visitor-keys": "5.59.11" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.59.11.tgz", + "integrity": "sha512-LZqVY8hMiVRF2a7/swmkStMYSoXMFlzL6sXV6U/2gL5cwnLWQgLEG8tjWPpaE4rMIdZ6VKWwcffPlo1jPfk43g==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "5.59.11", + "@typescript-eslint/utils": "5.59.11", + "debug": "^4.3.4", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.59.11.tgz", + "integrity": "sha512-epoN6R6tkvBYSc+cllrz+c2sOFWkbisJZWkOE+y3xHtvYaOE6Wk6B8e114McRJwFRjGvYdJwLXQH5c9osME/AA==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.59.11.tgz", + "integrity": "sha512-YupOpot5hJO0maupJXixi6l5ETdrITxeo5eBOeuV7RSKgYdU3G5cxO49/9WRnJq9EMrB7AuTSLH/bqOsXi7wPA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.11", + "@typescript-eslint/visitor-keys": "5.59.11", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.59.11.tgz", + "integrity": "sha512-didu2rHSOMUdJThLk4aZ1Or8IcO3HzCw/ZvEjTTIfjIrcdd5cvSIwwDy2AOlE7htSNp7QIZ10fLMyRCveesMLg==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@types/json-schema": "^7.0.9", + "@types/semver": "^7.3.12", + "@typescript-eslint/scope-manager": "5.59.11", + "@typescript-eslint/types": "5.59.11", + "@typescript-eslint/typescript-estree": "5.59.11", + "eslint-scope": "^5.1.1", + "semver": "^7.3.7" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "5.59.11", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.59.11.tgz", + "integrity": "sha512-KGYniTGG3AMTuKF9QBD7EIrvufkB6O6uX3knP73xbKLMpH+QRPcgnCxjWXSHjMRuOxFLovljqQgQpR0c7GvjoA==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.59.11", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@vitejs/plugin-react-swc": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.3.2.tgz", + "integrity": "sha512-VJFWY5sfoZerQRvJrh518h3AcQt6f/yTuWn4/TRB+dqmYU0NX1qz7qM5Wfd+gOQqUzQW4gxKqKN3KpE/P3+zrA==", + "dev": true, + "dependencies": { + "@swc/core": "^1.3.61" + }, + "peerDependencies": { + "vite": "^4" + } + }, + "node_modules/acorn": { + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", + "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/aria-hidden": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.3.tgz", + "integrity": "sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ==", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/aria-hidden/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/client-only": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", + "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" + }, + "node_modules/clsx": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", + "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/codemirror": { + "version": "5.65.14", + "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-5.65.14.tgz", + "integrity": "sha512-VSNugIBDGt0OU9gDjeVr6fNkoFQznrWEUdAApMlXQNbfE8gGO19776D6MwSqF/V/w/sDwonsQ0z7KmmI9guScg==" + }, + "node_modules/codemirror-graphql": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/codemirror-graphql/-/codemirror-graphql-2.0.9.tgz", + "integrity": "sha512-gl1LR6XSBgZtl7Dr2q4jjRNfhxMF8vn+rnjZTZPf/l+VrQgavY8l3G//hW7s3hWy73iiqkq5LZ4KE1tdaxB/vQ==", + "dependencies": { + "graphql-language-service": "5.1.7" + }, + "peerDependencies": { + "@codemirror/language": "6.0.0", + "codemirror": "^5.65.3", + "graphql": "^15.5.0 || ^16.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/copy-to-clipboard": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/copy-to-clipboard/-/copy-to-clipboard-3.3.3.tgz", + "integrity": "sha512-2KV8NhB5JqC3ky0r9PMCAZKbUHSwtEo4CwCs0KXgruG43gX5PMqDEBbVU4OUzw2MuAWUfsuFmWvEKG5QRfSnJA==", + "dependencies": { + "toggle-selection": "^1.0.6" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/csstype": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", + "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==", + "devOptional": true + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/entities": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", + "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/esbuild": { + "version": "0.17.19", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.17.19.tgz", + "integrity": "sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==", + "dev": true, + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/android-arm": "0.17.19", + "@esbuild/android-arm64": "0.17.19", + "@esbuild/android-x64": "0.17.19", + "@esbuild/darwin-arm64": "0.17.19", + "@esbuild/darwin-x64": "0.17.19", + "@esbuild/freebsd-arm64": "0.17.19", + "@esbuild/freebsd-x64": "0.17.19", + "@esbuild/linux-arm": "0.17.19", + "@esbuild/linux-arm64": "0.17.19", + "@esbuild/linux-ia32": "0.17.19", + "@esbuild/linux-loong64": "0.17.19", + "@esbuild/linux-mips64el": "0.17.19", + "@esbuild/linux-ppc64": "0.17.19", + "@esbuild/linux-riscv64": "0.17.19", + "@esbuild/linux-s390x": "0.17.19", + "@esbuild/linux-x64": "0.17.19", + "@esbuild/netbsd-x64": "0.17.19", + "@esbuild/openbsd-x64": "0.17.19", + "@esbuild/sunos-x64": "0.17.19", + "@esbuild/win32-arm64": "0.17.19", + "@esbuild/win32-ia32": "0.17.19", + "@esbuild/win32-x64": "0.17.19" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.45.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.45.0.tgz", + "integrity": "sha512-pd8KSxiQpdYRfYa9Wufvdoct3ZPQQuVuU5O6scNgMuOMYuxvH0IGaYK0wUFjo4UYYQQCUndlXiMbnxopwvvTiw==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.4.0", + "@eslint/eslintrc": "^2.1.0", + "@eslint/js": "8.44.0", + "@humanwhocodes/config-array": "^0.11.10", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "ajv": "^6.10.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.0", + "eslint-visitor-keys": "^3.4.1", + "espree": "^9.6.0", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", + "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", + "dev": true, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.3.5.tgz", + "integrity": "sha512-61qNIsc7fo9Pp/mju0J83kzvLm0Bsayu7OQSLEoJxLDCBjIIyb87bkzufoOvdDxLkSlMfkF7UxomC4+eztUBSA==", + "dev": true, + "peerDependencies": { + "eslint": ">=7" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.1.tgz", + "integrity": "sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/eslint-scope": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.0.tgz", + "integrity": "sha512-DYj5deGlHBfMt15J7rdtyKNq/Nqlv5KfU4iodrQ019XESsRnwXH9KAE0y3cwtUHDo2ob7CypAnCqefh6vioWRw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esquery/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "node_modules/fast-glob": { + "version": "3.2.12", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", + "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-patch": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/fast-json-patch/-/fast-json-patch-3.1.1.tgz", + "integrity": "sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/fastq": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", + "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", + "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "dev": true, + "dependencies": { + "flatted": "^3.1.0", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz", + "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", + "dev": true + }, + "node_modules/framer-motion": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-6.5.1.tgz", + "integrity": "sha512-o1BGqqposwi7cgDrtg0dNONhkmPsUFDaLcKXigzuTFC5x58mE8iyTazxSudFzmT6MEyJKfjjU8ItoMe3W+3fiw==", + "dependencies": { + "@motionone/dom": "10.12.0", + "framesync": "6.0.1", + "hey-listen": "^1.0.8", + "popmotion": "11.0.3", + "style-value-types": "5.0.0", + "tslib": "^2.1.0" + }, + "optionalDependencies": { + "@emotion/is-prop-valid": "^0.8.2" + }, + "peerDependencies": { + "react": ">=16.8 || ^17.0.0 || ^18.0.0", + "react-dom": ">=16.8 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/framer-motion/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/framesync": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/framesync/-/framesync-6.0.1.tgz", + "integrity": "sha512-fUY88kXvGiIItgNC7wcTOl0SNRCVXMKSWW2Yzfmn7EKNc+MpCzcz9DhdHcdjbrtN3c6R4H5dTY2jiCpPdysEjA==", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/framesync/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-nonce": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", + "integrity": "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "13.20.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", + "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/grapheme-splitter": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz", + "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", + "dev": true + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/graphiql": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.0.4.tgz", + "integrity": "sha512-5NVEG1I8CkpHtZEauvHnU4yoVPjktTHiSMsxXCMwEB6OMkvSg71Fix1MtTc1k/8HnJUTomIDLodRAiRM3Hu+dQ==", + "dependencies": { + "@graphiql/react": "^0.19.2", + "@graphiql/toolkit": "^0.9.1", + "graphql-language-service": "^5.1.7", + "markdown-it": "^12.2.0" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0", + "react": "^16.8.0 || ^17 || ^18", + "react-dom": "^16.8.0 || ^17 || ^18" + } + }, + "node_modules/graphql": { + "version": "16.7.1", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.7.1.tgz", + "integrity": "sha512-DRYR9tf+UGU0KOsMcKAlXeFfX89UiiIZ0dRU3mR0yJfu6OjZqUcp68NnFLnqQU5RexygFoDy1EW+ccOYcPfmHg==", + "engines": { + "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" + } + }, + "node_modules/graphql-language-service": { + "version": "5.1.7", + "resolved": "https://registry.npmjs.org/graphql-language-service/-/graphql-language-service-5.1.7.tgz", + "integrity": "sha512-xkawYMJeoNYGhT+SpSH3c2qf6HpGHQ/duDmrseVHBpVCrXAiGnliXGSCC4jyMGgZQ05GytsZ12p0nUo7s6lSSw==", + "dependencies": { + "nullthrows": "^1.0.0", + "vscode-languageserver-types": "^3.17.1" + }, + "bin": { + "graphql": "dist/temp-bin.js" + }, + "peerDependencies": { + "graphql": "^15.5.0 || ^16.0.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/hey-listen": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/hey-listen/-/hey-listen-1.0.8.tgz", + "integrity": "sha512-COpmrF2NOg4TBWUJ5UVyaCU2A88wEMkUPK4hNqyCkqHbxT92BbvfjoSozkAIIm6XhicGlJHhFdullInrdhwU8Q==" + }, + "node_modules/ignore": { + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", + "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-primitive": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-primitive/-/is-primitive-3.0.1.tgz", + "integrity": "sha512-GljRxhWvlCNRfZyORiH77FwdFwGcMO620o37EOYC0ORWdq+WYNVqW0w2Juzew4M+L81l6/QS3t5gkkihyRqv9w==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/linkify-it": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.3.tgz", + "integrity": "sha512-ynTsyrFSdE5oZ/O9GEf00kPngmOfVwazR5GKDq6EYfhlpFug3J2zybX56a2PRRpc9P+FuSoGNAwjlbDs9jJBPQ==", + "dependencies": { + "uc.micro": "^1.0.1" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/markdown-it": { + "version": "12.3.2", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.3.2.tgz", + "integrity": "sha512-TchMembfxfNVpHkbtriWltGWc+m3xszaRD0CZup7GFFhzIgQqxIfn3eGj1yZpfuflzPvfkt611B2Q/Bsk1YnGg==", + "dependencies": { + "argparse": "^2.0.1", + "entities": "~2.1.0", + "linkify-it": "^3.0.1", + "mdurl": "^1.0.1", + "uc.micro": "^1.0.5" + }, + "bin": { + "markdown-it": "bin/markdown-it.js" + } + }, + "node_modules/mdurl": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", + "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/meros": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/meros/-/meros-1.3.0.tgz", + "integrity": "sha512-2BNGOimxEz5hmjUG2FwoxCt5HN7BXdaWyFqEwxPTrJzVdABtrL4TiHTcsWSFAxPQ/tOnEaQEJh3qWq71QRMY+w==", + "engines": { + "node": ">=13" + }, + "peerDependencies": { + "@types/node": ">=13" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/nanoid": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/natural-compare-lite": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz", + "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", + "dev": true + }, + "node_modules/nullthrows": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/nullthrows/-/nullthrows-1.1.1.tgz", + "integrity": "sha512-2vPPEi+Z7WqML2jZYddDIfy5Dqb0r2fze2zTxNNknZaFpVHU3mFB3R+DWeJWGVx0ecvttSGlJTI+WG+8Z4cDWw==" + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", + "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", + "dev": true, + "dependencies": { + "@aashutoshrathi/word-wrap": "^1.2.3", + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/popmotion": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/popmotion/-/popmotion-11.0.3.tgz", + "integrity": "sha512-Y55FLdj3UxkR7Vl3s7Qr4e9m0onSnP8W7d/xQLsoJM40vs6UKHFdygs6SWryasTZYqugMjm3BepCF4CWXDiHgA==", + "dependencies": { + "framesync": "6.0.1", + "hey-listen": "^1.0.8", + "style-value-types": "5.0.0", + "tslib": "^2.1.0" + } + }, + "node_modules/popmotion/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/postcss": { + "version": "8.4.24", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz", + "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/punycode": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", + "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/react": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", + "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", + "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.0" + }, + "peerDependencies": { + "react": "^18.2.0" + } + }, + "node_modules/react-hook-form": { + "version": "7.45.2", + "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.45.2.tgz", + "integrity": "sha512-9s45OdTaKN+4NSTbXVqeDITd/nwIg++nxJGL8+OD5uf1DxvhsXQ641kaYHk5K28cpIOTYm71O/fYk7rFaygb3A==", + "engines": { + "node": ">=12.22.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/react-hook-form" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17 || ^18" + } + }, + "node_modules/react-remove-scroll": { + "version": "2.5.5", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.5.tgz", + "integrity": "sha512-ImKhrzJJsyXJfBZ4bzu8Bwpka14c/fQt0k+cyFp/PBhTfyDnU5hjOtM4AG/0AMyy8oKzOTR0lDgJIM7pYXI0kw==", + "dependencies": { + "react-remove-scroll-bar": "^2.3.3", + "react-style-singleton": "^2.2.1", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.0", + "use-sidecar": "^1.1.2" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar": { + "version": "2.3.4", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.4.tgz", + "integrity": "sha512-63C4YQBUt0m6ALadE9XV56hV8BgJWDmmTPY758iIJjfQKt2nYwoUrPk0LXRXcB/yIj82T1/Ixfdpdk68LwIB0A==", + "dependencies": { + "react-style-singleton": "^2.2.1", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-remove-scroll-bar/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/react-remove-scroll/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/react-style-singleton": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", + "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==", + "dependencies": { + "get-nonce": "^1.0.0", + "invariant": "^2.2.4", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/react-style-singleton/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/regenerator-runtime": { + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rollup": { + "version": "3.25.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.25.1.tgz", + "integrity": "sha512-tywOR+rwIt5m2ZAWSe5AIJcTat8vGlnPFAv15ycCrw33t6iFsXZ6mzHVFh2psSjxQPmI+xgzMZZizUAukBI4aQ==", + "dev": true, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=14.18.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/scheduler": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "7.5.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.1.tgz", + "integrity": "sha512-Wvss5ivl8TMRZXXESstBA4uR5iXgEN/VC5/sOcuXdVLzcdkz4HWetIoRfG5gb5X+ij/G9rw9YoGn3QoQ8OCSpw==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/set-value": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/set-value/-/set-value-4.1.0.tgz", + "integrity": "sha512-zTEg4HL0RwVrqcWs3ztF+x1vkxfm0lP+MQQFPiMJTKVceBwEV0A569Ou8l9IYQG8jOZdMVI1hGsc0tmeD2o/Lw==", + "funding": [ + "https://github.com/sponsors/jonschlinkert", + "https://paypal.me/jonathanschlinkert", + "https://jonschlinkert.dev/sponsor" + ], + "dependencies": { + "is-plain-object": "^2.0.4", + "is-primitive": "^3.0.1" + }, + "engines": { + "node": ">=11.0" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-mod": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.0.3.tgz", + "integrity": "sha512-78Jv8kYJdjbvRwwijtCevYADfsI0lGzYJe4mMFdceO8l75DFFDoqBhR1jVDicDRRaX4//g1u9wKeo+ztc2h1Rw==", + "peer": true + }, + "node_modules/style-value-types": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/style-value-types/-/style-value-types-5.0.0.tgz", + "integrity": "sha512-08yq36Ikn4kx4YU6RD7jWEv27v4V+PUsOGa4n/as8Et3CuODMJQ00ENeAVXAeydX4Z2j1XHZF1K2sX4mGl18fA==", + "dependencies": { + "hey-listen": "^1.0.8", + "tslib": "^2.1.0" + } + }, + "node_modules/style-value-types/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toggle-selection": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz", + "integrity": "sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ==" + }, + "node_modules/tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", + "dev": true + }, + "node_modules/tsutils": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz", + "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==", + "dev": true, + "dependencies": { + "tslib": "^1.8.1" + }, + "engines": { + "node": ">= 6" + }, + "peerDependencies": { + "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.3.tgz", + "integrity": "sha512-XH627E9vkeqhlZFQuL+UsyAXEnibT0kWR2FWONlr4sTjvxyJYnyefgrkyECLzM5NenmKzRAy2rR/OlYLA1HkZw==", + "dev": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/uc.micro": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", + "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/use-callback-ref": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.0.tgz", + "integrity": "sha512-3FT9PRuRdbB9HfXhEq35u4oZkvpJ5kuYbpqhCfmiZyReuRgpnhDlbr2ZEnnuS0RrJAPn6l23xjFg9kpDM+Ms7w==", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-callback-ref/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/use-sidecar": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", + "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==", + "dependencies": { + "detect-node-es": "^1.1.0", + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "^16.9.0 || ^17.0.0 || ^18.0.0", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/use-sidecar/node_modules/tslib": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", + "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + }, + "node_modules/use-sync-external-store": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz", + "integrity": "sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/vite": { + "version": "4.3.9", + "resolved": "https://registry.npmjs.org/vite/-/vite-4.3.9.tgz", + "integrity": "sha512-qsTNZjO9NoJNW7KnOrgYwczm0WctJ8m/yqYAMAK9Lxt4SoySUfS5S8ia9K7JHpa3KEeMfyF8LoJ3c5NeBJy6pg==", + "dev": true, + "dependencies": { + "esbuild": "^0.17.5", + "postcss": "^8.4.23", + "rollup": "^3.21.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + }, + "peerDependencies": { + "@types/node": ">= 14", + "less": "*", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vscode-languageserver-types": { + "version": "3.17.3", + "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.3.tgz", + "integrity": "sha512-SYU4z1dL0PyIMd4Vj8YOqFvHu7Hz/enbWtpfnVbJHU4Nd1YNYx8u0ennumc6h48GQNeOLxmwySmnADouT/AuZA==" + }, + "node_modules/w3c-keyname": { + "version": "2.2.8", + "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", + "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==", + "peer": true + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/playground/package.json b/playground/package.json new file mode 100644 index 0000000000..184ec188a0 --- /dev/null +++ b/playground/package.json @@ -0,0 +1,33 @@ +{ + "name": "playground", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "lint": "eslint src --ext ts,tsx --report-unused-disable-directives --max-warnings 0", + "preview": "vite preview" + }, + "dependencies": { + "@tanstack/react-query": "^4.32.0", + "fast-json-patch": "^3.1.1", + "graphiql": "^3.0.4", + "graphql": "^16.7.1", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-hook-form": "^7.45.2" + }, + "devDependencies": { + "@types/react": "^18.2.15", + "@types/react-dom": "^18.2.7", + "@typescript-eslint/eslint-plugin": "^5.59.0", + "@typescript-eslint/parser": "^5.62.0", + "@vitejs/plugin-react-swc": "^3.0.0", + "eslint": "^8.45.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.3.4", + "typescript": "^5.0.2", + "vite": "^4.3.9" + } +} diff --git a/playground/playground.go b/playground/playground.go new file mode 100644 index 0000000000..6894d339b8 --- /dev/null +++ b/playground/playground.go @@ -0,0 +1,20 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +//go:build playground + +package playground + +import ( + "embed" +) + +//go:embed dist +var Dist embed.FS diff --git a/playground/src/App.tsx b/playground/src/App.tsx new file mode 100644 index 0000000000..dc00b98cbc --- /dev/null +++ b/playground/src/App.tsx @@ -0,0 +1,35 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { GraphiQL } from 'graphiql' +import { createGraphiQLFetcher } from '@graphiql/toolkit' +import { GraphiQLPlugin } from '@graphiql/react' +import { QueryClient, QueryClientProvider } from '@tanstack/react-query' +import { Plugin } from './components/Plugin' +import 'graphiql/graphiql.css' + +const client = new QueryClient() +const fetcher = createGraphiQLFetcher({ url: 'http://localhost:9181/api/v0/graphql' }) + +const plugin: GraphiQLPlugin = { + title: 'DefraDB', + icon: () => (DB), + content: () => (), +} + +function App() { + return ( + + + + ) +} + +export default App diff --git a/playground/src/components/Plugin.tsx b/playground/src/components/Plugin.tsx new file mode 100644 index 0000000000..e8c727fe61 --- /dev/null +++ b/playground/src/components/Plugin.tsx @@ -0,0 +1,57 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { useQuery } from '@tanstack/react-query' +import { SchemaLoadForm } from './SchemaLoadForm' +import { SchemaPatchForm } from './SchemaPatchForm' +import { listSchema } from '../lib/api' + +const defaultFieldTypes = [ + 'ID', + 'Boolean', + '[Boolean]', + '[Boolean!]', + 'Int', + '[Int]', + '[Int!]', + 'DateTime', + 'Float', + '[Float]', + '[Float!]', + 'String', + '[String]', + '[String!]', +] + +export function Plugin() { + const { data } = useQuery({ queryKey: ['schemas'], queryFn: listSchema }) + + const collections = data?.data?.collections ?? [] + const schemaFieldTypes = collections.map(col => [`${col.name}`, `[${col.name}]`]).flat() + const fieldTypes = [...defaultFieldTypes, ...schemaFieldTypes] + + return ( + + DefraDB + + + Add Schema + + + { collections?.map((schema) => + + {schema.name} Schema + + + )} + + + ) +} \ No newline at end of file diff --git a/playground/src/components/SchemaLoadForm.tsx b/playground/src/components/SchemaLoadForm.tsx new file mode 100644 index 0000000000..a1df44d87c --- /dev/null +++ b/playground/src/components/SchemaLoadForm.tsx @@ -0,0 +1,81 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { useState, useEffect } from 'react' +import { useForm } from 'react-hook-form' +import { useSchemaContext } from '@graphiql/react' +import { useQueryClient } from '@tanstack/react-query' +import { loadSchema, ErrorItem } from '../lib/api' + +export type FormData = { + schema: string +} + +const defaultValues: FormData = { + schema: '', +} + +export function SchemaLoadForm() { + const queryClient = useQueryClient() + const schemaContext = useSchemaContext({ nonNull: true }) + + const { formState, reset, register, handleSubmit } = useForm({ defaultValues }) + + const [errors, setErrors] = useState() + const [isLoading, setIsLoading] = useState(false) + + useEffect(() => { + if (formState.isSubmitSuccessful) reset(defaultValues) + }, [formState, reset]) + + const onSubmit = async (data: FormData) => { + setErrors(undefined) + setIsLoading(true) + + try { + const res = await loadSchema(data.schema) + if (res.errors) { + setErrors(res.errors) + } else { + schemaContext.introspect() + queryClient.invalidateQueries(['schemas']) + } + } catch(err: any) { + setErrors([{ message: err.message }]) + } finally { + setIsLoading(false) + } + } + + return ( + + {errors?.map((error, index) => + + {error.message} + + )} + + + Submit + + + ) +} \ No newline at end of file diff --git a/playground/src/components/SchemaPatchForm.tsx b/playground/src/components/SchemaPatchForm.tsx new file mode 100644 index 0000000000..d832cb8bec --- /dev/null +++ b/playground/src/components/SchemaPatchForm.tsx @@ -0,0 +1,117 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { useState } from 'react' +import { useForm, useFieldArray } from 'react-hook-form' +import { useSchemaContext } from '@graphiql/react' +import { useQueryClient } from '@tanstack/react-query' +import { patchSchema, Field, ErrorItem } from '../lib/api' + +export type FormData = { + name: string + fields: Field[] +} + +export type SchemaPatchFormProps = { + values?: FormData + fieldTypes: string[] +} + +export function SchemaPatchForm({ values, fieldTypes }: SchemaPatchFormProps) { + const queryClient = useQueryClient() + const schemaContext = useSchemaContext({ nonNull: true }) + + const [errors, setErrors] = useState() + const [isLoading, setIsLoading] = useState(false) + + const { control, register, handleSubmit } = useForm({ values }) + const { fields, append, remove } = useFieldArray({ control, name: 'fields', keyName: '_id' }) + + const onSubmit = async (data: FormData) => { + setErrors(undefined) + setIsLoading(true) + + try { + const res = await patchSchema(values!.name, values!.fields, data.name, data.fields) + if (res.errors) { + setErrors(res.errors) + } else { + schemaContext.introspect() + queryClient.invalidateQueries(['schemas']) + } + } catch(err: any) { + setErrors([{ message: err.message }]) + } finally { + setIsLoading(false) + } + } + + return ( + + {errors?.map((error, index) => + + {error.message} + + )} + + Fields + append({ name: '', kind: 'String', internal: false })} + > + Add + + + {fields.map((field, index) => + + + + {fieldTypes.map((value, index) => + {value} + )} + + {!field.id && + remove(index)} + disabled={isLoading || !!field.id} + > + Remove + + } + + )} + + Submit + + + ) +} \ No newline at end of file diff --git a/playground/src/index.css b/playground/src/index.css new file mode 100644 index 0000000000..78177e137f --- /dev/null +++ b/playground/src/index.css @@ -0,0 +1,103 @@ +/* Copyright 2023 Democratized Data Foundation + +Use of this software is governed by the Business Source License +included in the file licenses/BSL.txt. + +As of the Change Date specified in that file, in accordance with +the Business Source License, use of this software will be governed +by the Apache License, Version 2.0, included in the file +licenses/APL.txt. */ + +html, body, #root { + height: 100%; + width: 100%; + margin: 0; +} + +#graphiql { + height: 100vh; +} + +.graphiql-defradb-plugin { + display: flex; + flex-direction: column; + gap: 16px; +} + +.graphiql-defradb-header { + margin-top: 0; + font-size: var(--font-size-h2); + font-weight: var(--font-weight-medium); +} + +.graphiql-defradb-subheader { + margin: 0; + font-size: var(--font-size-h3); + font-weight: var(--font-weight-medium); +} + +.graphiql-defradb-input-label { + margin: 0; + font-size: var(--font-size-h5); + font-weight: var(--font-weight-medium); +} + +.graphiql-defradb-error { + color: rgb(255, 93, 93); + border: 2px solid rgb(255, 93, 93); + border-radius: var(--border-radius-4); + padding: var(--px-8); +} + +.graphiql-defradb-input { + height: 30px; + width: 100%; + + font-size: var(--font-size-body); + color: var(--color-neutral); + + background-color: var(--color-primary); + border: 2px solid hsla(var(--color-neutral), var(--alpha-background-light)); + border-radius: var(--border-radius-4); +} + +.graphiql-defradb-textarea { + min-height: 300px; + width: 100%; + resize: vertical; + + font-size: var(--font-size-body); + color: var(--color-neutral); + + background-color: var(--color-primary); + border: 2px solid hsla(var(--color-neutral), var(--alpha-background-light)); + border-radius: var(--border-radius-4); +} + +.graphiql-defradb-load-form { + display: flex; + flex-direction: column; + gap: var(--px-8); +} + +.graphiql-defradb-form { + border-radius: var(--border-radius-4); + border: 2px solid hsla(var(--color-neutral), var(--alpha-background-light)); + padding: var(--px-16); + + display: flex; + flex-direction: column; + gap: var(--px-8); +} + +.graphiql-defradb-field-header { + display: flex; + align-items: center; + justify-content: space-between; +} + +.graphiql-defradb-field { + display: flex; + align-items: center; + gap: var(--px-8); +} diff --git a/playground/src/lib/api.ts b/playground/src/lib/api.ts new file mode 100644 index 0000000000..cb8bb07e85 --- /dev/null +++ b/playground/src/lib/api.ts @@ -0,0 +1,77 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import { compare } from 'fast-json-patch' + +export type Extensions = { + status: number + httpError: string + stack?: string +} + +export type ErrorItem = { + message: string + extensions?: Extensions +} + +export type Field = { + id?: string + name: string + kind: string + internal: boolean +} + +export type Collection = { + id: string + name: string +} + +export type CollectionWithFields = Collection & { + fields: Field[] +} + +export type Response = { + data: T + errors?: ErrorItem[] +} + +export type ListSchemaResponse = Response<{ + collections?: CollectionWithFields[] +}> + +export type LoadSchemaResponse = Response<{ + result?: string + collections?: Collection[] +}> + +export type PatchSchemaResponse = Response<{ + result?: string +}> + +const baseUrl = import.meta.env.DEV ? 'http://localhost:9181/api/v0' : '/api/v0' + +export async function listSchema(): Promise { + return fetch(baseUrl + '/schema').then(res => res.json()) +} + +export async function loadSchema(schema: string): Promise { + return fetch(baseUrl + '/schema', { method: 'POST', body: schema }).then(res => res.json()) +} + +export async function patchSchema(nameA: string, fieldsA: Field[], nameB: string, fieldsB: Field[]): Promise { + const schemaA = { Name: nameA, Fields: fieldsA.map(field => ({ Name: field.name, Kind: field.kind })) } + const schemaB = { Name: nameB, Fields: fieldsB.map(field => ({ Name: field.name, Kind: field.kind })) } + + const collectionA = { [nameA]: { Name: nameA, Schema: schemaA } } + const collectionB = { [nameB]: { Name: nameB, Schema: schemaB } } + + const body = JSON.stringify(compare(collectionA, collectionB)) + return fetch(baseUrl + '/schema', { method: 'PATCH', body }).then(res => res.json()) +} diff --git a/playground/src/main.tsx b/playground/src/main.tsx new file mode 100644 index 0000000000..1d05ee6749 --- /dev/null +++ b/playground/src/main.tsx @@ -0,0 +1,20 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +import React from 'react' +import ReactDOM from 'react-dom/client' +import App from './App.tsx' +import './index.css' + +ReactDOM.createRoot(document.getElementById('root') as HTMLElement).render( + + + , +) diff --git a/playground/src/vite-env.d.ts b/playground/src/vite-env.d.ts new file mode 100644 index 0000000000..11f02fe2a0 --- /dev/null +++ b/playground/src/vite-env.d.ts @@ -0,0 +1 @@ +/// diff --git a/playground/tsconfig.json b/playground/tsconfig.json new file mode 100644 index 0000000000..a7fc6fbf23 --- /dev/null +++ b/playground/tsconfig.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx", + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["src"], + "references": [{ "path": "./tsconfig.node.json" }] +} diff --git a/playground/tsconfig.node.json b/playground/tsconfig.node.json new file mode 100644 index 0000000000..42872c59f5 --- /dev/null +++ b/playground/tsconfig.node.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/playground/vite.config.ts b/playground/vite.config.ts new file mode 100644 index 0000000000..861b04b356 --- /dev/null +++ b/playground/vite.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from 'vite' +import react from '@vitejs/plugin-react-swc' + +// https://vitejs.dev/config/ +export default defineConfig({ + plugins: [react()], +}) diff --git a/request/graphql/parser.go b/request/graphql/parser.go index 2f8c910018..ddd13d9e62 100644 --- a/request/graphql/parser.go +++ b/request/graphql/parser.go @@ -103,7 +103,10 @@ func (p *parser) Parse(ast *ast.Document) (*request.Request, []error) { return query, nil } -func (p *parser) ParseSDL(ctx context.Context, schemaString string) ([]client.CollectionDescription, error) { +func (p *parser) ParseSDL(ctx context.Context, schemaString string) ( + []client.CollectionDescription, + error, +) { return schema.FromString(ctx, schemaString) } diff --git a/request/graphql/parser/errors.go b/request/graphql/parser/errors.go index 6d014afcce..c629f11c19 100644 --- a/request/graphql/parser/errors.go +++ b/request/graphql/parser/errors.go @@ -23,4 +23,5 @@ var ( ErrInvalidNumberOfExplainArgs = errors.New("invalid number of arguments to an explain request") ErrUnknownExplainType = errors.New("invalid / unknown explain type") ErrUnknownGQLOperation = errors.New("unknown GraphQL operation type") + ErrInvalidFilterConditions = errors.New("invalid filter condition type, expected map") ) diff --git a/request/graphql/parser/filter.go b/request/graphql/parser/filter.go index 7afb626372..46119070b4 100644 --- a/request/graphql/parser/filter.go +++ b/request/graphql/parser/filter.go @@ -190,6 +190,67 @@ func parseVal(val ast.Value, recurseFn parseFn) (any, error) { return nil, ErrFailedToParseConditionValue } +// ParseFilterFieldsForDescription parses the fields that are defined in the SchemaDescription +// from the filter conditions“ +func ParseFilterFieldsForDescription( + conditions map[string]any, + schema client.SchemaDescription, +) ([]client.FieldDescription, error) { + return parseFilterFieldsForDescriptionMap(conditions, schema) +} + +func parseFilterFieldsForDescriptionMap( + conditions map[string]any, + schema client.SchemaDescription, +) ([]client.FieldDescription, error) { + fields := make([]client.FieldDescription, 0) + for k, v := range conditions { + switch k { + case "_or", "_and": + conds := v.([]any) + parsedFileds, err := parseFilterFieldsForDescriptionSlice(conds, schema) + if err != nil { + return nil, err + } + fields = append(fields, parsedFileds...) + case "_not": + conds := v.(map[string]any) + parsedFileds, err := parseFilterFieldsForDescriptionMap(conds, schema) + if err != nil { + return nil, err + } + fields = append(fields, parsedFileds...) + default: + f, found := schema.GetField(k) + if !found || f.IsObject() { + continue + } + fields = append(fields, f) + } + } + return fields, nil +} + +func parseFilterFieldsForDescriptionSlice( + conditions []any, + schema client.SchemaDescription, +) ([]client.FieldDescription, error) { + fields := make([]client.FieldDescription, 0) + for _, v := range conditions { + switch cond := v.(type) { + case map[string]any: + parsedFields, err := parseFilterFieldsForDescriptionMap(cond, schema) + if err != nil { + return nil, err + } + fields = append(fields, parsedFields...) + default: + return nil, ErrInvalidFilterConditions + } + } + return fields, nil +} + /* userCollection := db.getCollection("users") doc := userCollection.NewFromJSON("{ diff --git a/request/graphql/parser/request.go b/request/graphql/parser/request.go index b2680971bc..c7d7c36140 100644 --- a/request/graphql/parser/request.go +++ b/request/graphql/parser/request.go @@ -109,11 +109,11 @@ func parseDirectives(astDirectives []*ast.Directive) (request.Directives, error) if astDirective.Name.Value == request.ExplainLabel { // Explain directive found, lets parse and validate the directive. - parsedExplainDirctive, err := parseExplainDirective(astDirective) + parsedExplainDirective, err := parseExplainDirective(astDirective) if err != nil { return request.Directives{}, err } - explainDirective = parsedExplainDirctive + explainDirective = parsedExplainDirective } } @@ -145,6 +145,9 @@ func parseExplainDirective(astDirective *ast.Directive) (immutable.Option[reques case schemaTypes.ExplainArgExecute: return immutable.Some(request.ExecuteExplain), nil + case schemaTypes.ExplainArgDebug: + return immutable.Some(request.DebugExplain), nil + default: return immutable.None[request.ExplainType](), ErrUnknownExplainType } diff --git a/request/graphql/schema/collection.go b/request/graphql/schema/collection.go index 4058d3edc0..d48c7bb638 100644 --- a/request/graphql/schema/collection.go +++ b/request/graphql/schema/collection.go @@ -17,6 +17,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/request/graphql/schema/types" "github.com/graphql-go/graphql/language/ast" gqlp "github.com/graphql-go/graphql/language/parser" @@ -24,7 +25,10 @@ import ( ) // FromString parses a GQL SDL string into a set of collection descriptions. -func FromString(ctx context.Context, schemaString string) ([]client.CollectionDescription, error) { +func FromString(ctx context.Context, schemaString string) ( + []client.CollectionDescription, + error, +) { source := source.NewSource(&source.Source{ Body: []byte(schemaString), }) @@ -38,12 +42,14 @@ func FromString(ctx context.Context, schemaString string) ([]client.CollectionDe return nil, err } - desc, err := fromAst(ctx, doc) - return desc, err + return fromAst(ctx, doc) } // fromAst parses a GQL AST into a set of collection descriptions. -func fromAst(ctx context.Context, doc *ast.Document) ([]client.CollectionDescription, error) { +func fromAst(ctx context.Context, doc *ast.Document) ( + []client.CollectionDescription, + error, +) { relationManager := NewRelationManager() descriptions := []client.CollectionDescription{} @@ -88,64 +94,24 @@ func fromAstDefinition( }, } + indexDescriptions := []client.IndexDescription{} for _, field := range def.Fields { - kind, err := astTypeToKind(field.Type) + tmpFieldsDescriptions, err := fieldsFromAST(field, relationManager, def) if err != nil { return client.CollectionDescription{}, err } - schema := "" - relationName := "" - relationType := client.RelationType(0) + fieldDescriptions = append(fieldDescriptions, tmpFieldsDescriptions...) - if kind == client.FieldKind_FOREIGN_OBJECT || kind == client.FieldKind_FOREIGN_OBJECT_ARRAY { - if kind == client.FieldKind_FOREIGN_OBJECT { - schema = field.Type.(*ast.Named).Name.Value - relationType = client.Relation_Type_ONE - if _, exists := findDirective(field, "primary"); exists { - relationType |= client.Relation_Type_Primary + for _, directive := range field.Directives { + if directive.Name.Value == types.IndexDirectiveLabel { + index, err := fieldIndexFromAST(field, directive) + if err != nil { + return client.CollectionDescription{}, err } - - // An _id field is added for every 1-N relationship from this object. - fieldDescriptions = append(fieldDescriptions, client.FieldDescription{ - Name: fmt.Sprintf("%s_id", field.Name.Value), - Kind: client.FieldKind_DocKey, - Typ: defaultCRDTForFieldKind[client.FieldKind_DocKey], - RelationType: client.Relation_Type_INTERNAL_ID, - }) - } else if kind == client.FieldKind_FOREIGN_OBJECT_ARRAY { - schema = field.Type.(*ast.List).Type.(*ast.Named).Name.Value - relationType = client.Relation_Type_MANY - } - - relationName, err = getRelationshipName(field, def.Name.Value, schema) - if err != nil { - return client.CollectionDescription{}, err - } - - // Register the relationship so that the relationship manager can evaluate - // relationsip properties dependent on both collections in the relationship. - _, err := relationManager.RegisterSingle( - relationName, - schema, - field.Name.Value, - relationType, - ) - if err != nil { - return client.CollectionDescription{}, err + indexDescriptions = append(indexDescriptions, index) } } - - fieldDescription := client.FieldDescription{ - Name: field.Name.Value, - Kind: kind, - Typ: defaultCRDTForFieldKind[kind], - Schema: schema, - RelationName: relationName, - RelationType: relationType, - } - - fieldDescriptions = append(fieldDescriptions, fieldDescription) } // sort the fields lexicographically @@ -159,15 +125,202 @@ func fromAstDefinition( return fieldDescriptions[i].Name < fieldDescriptions[j].Name }) + for _, directive := range def.Directives { + if directive.Name.Value == types.IndexDirectiveLabel { + index, err := indexFromAST(directive) + if err != nil { + return client.CollectionDescription{}, err + } + indexDescriptions = append(indexDescriptions, index) + } + } + return client.CollectionDescription{ Name: def.Name.Value, Schema: client.SchemaDescription{ Name: def.Name.Value, Fields: fieldDescriptions, }, + Indexes: indexDescriptions, }, nil } +// IsValidIndexName returns true if the name is a valid index name. +// Valid index names must start with a letter or underscore, and can +// contain letters, numbers, and underscores. +func IsValidIndexName(name string) bool { + if len(name) == 0 { + return false + } + if name[0] != '_' && (name[0] < 'a' || name[0] > 'z') && (name[0] < 'A' || name[0] > 'Z') { + return false + } + for i := 1; i < len(name); i++ { + c := name[i] + if (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && (c < '0' || c > '9') && c != '_' { + return false + } + } + return true +} + +func fieldIndexFromAST(field *ast.FieldDefinition, directive *ast.Directive) (client.IndexDescription, error) { + desc := client.IndexDescription{ + Fields: []client.IndexedFieldDescription{ + { + Name: field.Name.Value, + Direction: client.Ascending, + }, + }, + } + for _, arg := range directive.Arguments { + switch arg.Name.Value { + case types.IndexDirectivePropName: + nameVal, ok := arg.Value.(*ast.StringValue) + if !ok { + return client.IndexDescription{}, ErrIndexWithInvalidArg + } + desc.Name = nameVal.Value + if !IsValidIndexName(desc.Name) { + return client.IndexDescription{}, NewErrIndexWithInvalidName(desc.Name) + } + default: + return client.IndexDescription{}, ErrIndexWithUnknownArg + } + } + return desc, nil +} + +func indexFromAST(directive *ast.Directive) (client.IndexDescription, error) { + desc := client.IndexDescription{} + var directions *ast.ListValue + for _, arg := range directive.Arguments { + switch arg.Name.Value { + case types.IndexDirectivePropName: + nameVal, ok := arg.Value.(*ast.StringValue) + if !ok { + return client.IndexDescription{}, ErrIndexWithInvalidArg + } + desc.Name = nameVal.Value + if !IsValidIndexName(desc.Name) { + return client.IndexDescription{}, ErrIndexWithInvalidArg + } + case types.IndexDirectivePropFields: + fieldsVal, ok := arg.Value.(*ast.ListValue) + if !ok { + return client.IndexDescription{}, ErrIndexWithInvalidArg + } + for _, field := range fieldsVal.Values { + fieldVal, ok := field.(*ast.StringValue) + if !ok { + return client.IndexDescription{}, ErrIndexWithInvalidArg + } + desc.Fields = append(desc.Fields, client.IndexedFieldDescription{ + Name: fieldVal.Value, + }) + } + case types.IndexDirectivePropDirections: + var ok bool + directions, ok = arg.Value.(*ast.ListValue) + if !ok { + return client.IndexDescription{}, ErrIndexWithInvalidArg + } + default: + return client.IndexDescription{}, ErrIndexWithUnknownArg + } + } + if len(desc.Fields) == 0 { + return client.IndexDescription{}, ErrIndexMissingFields + } + if directions != nil { + if len(directions.Values) != len(desc.Fields) { + return client.IndexDescription{}, ErrIndexWithInvalidArg + } + for i := range desc.Fields { + dirVal, ok := directions.Values[i].(*ast.EnumValue) + if !ok { + return client.IndexDescription{}, ErrIndexWithInvalidArg + } + if dirVal.Value == string(client.Ascending) { + desc.Fields[i].Direction = client.Ascending + } else if dirVal.Value == string(client.Descending) { + desc.Fields[i].Direction = client.Descending + } + } + } else { + for i := range desc.Fields { + desc.Fields[i].Direction = client.Ascending + } + } + return desc, nil +} + +func fieldsFromAST(field *ast.FieldDefinition, + relationManager *RelationManager, + def *ast.ObjectDefinition, +) ([]client.FieldDescription, error) { + kind, err := astTypeToKind(field.Type) + if err != nil { + return nil, err + } + + schema := "" + relationName := "" + relationType := client.RelationType(0) + + fieldDescriptions := []client.FieldDescription{} + + if kind == client.FieldKind_FOREIGN_OBJECT || kind == client.FieldKind_FOREIGN_OBJECT_ARRAY { + if kind == client.FieldKind_FOREIGN_OBJECT { + schema = field.Type.(*ast.Named).Name.Value + relationType = client.Relation_Type_ONE + if _, exists := findDirective(field, "primary"); exists { + relationType |= client.Relation_Type_Primary + } + + // An _id field is added for every 1-N relationship from this object. + fieldDescriptions = append(fieldDescriptions, client.FieldDescription{ + Name: fmt.Sprintf("%s_id", field.Name.Value), + Kind: client.FieldKind_DocKey, + Typ: defaultCRDTForFieldKind[client.FieldKind_DocKey], + RelationType: client.Relation_Type_INTERNAL_ID, + }) + } else if kind == client.FieldKind_FOREIGN_OBJECT_ARRAY { + schema = field.Type.(*ast.List).Type.(*ast.Named).Name.Value + relationType = client.Relation_Type_MANY + } + + relationName, err = getRelationshipName(field, def.Name.Value, schema) + if err != nil { + return nil, err + } + + // Register the relationship so that the relationship manager can evaluate + // relationsip properties dependent on both collections in the relationship. + _, err := relationManager.RegisterSingle( + relationName, + schema, + field.Name.Value, + relationType, + ) + if err != nil { + return nil, err + } + } + + fieldDescription := client.FieldDescription{ + Name: field.Name.Value, + Kind: kind, + Typ: defaultCRDTForFieldKind[kind], + Schema: schema, + RelationName: relationName, + RelationType: relationType, + } + + fieldDescriptions = append(fieldDescriptions, fieldDescription) + return fieldDescriptions, nil +} + func astTypeToKind(t ast.Type) (client.FieldKind, error) { const ( typeID string = "ID" @@ -288,6 +441,11 @@ func finalizeRelations(relationManager *RelationManager, descriptions []client.C return NewErrRelationMissingField(field.Schema, field.Name) } + // if not finalized then we are missing one side of the relationship + if !rel.finalized { + return NewErrRelationOneSided(field.Schema) + } + field.RelationType = rel.Kind() | fieldRelationType description.Schema.Fields[i] = field } diff --git a/request/graphql/schema/descriptions_test.go b/request/graphql/schema/descriptions_test.go index 21fecd2009..2ce5e55dc9 100644 --- a/request/graphql/schema/descriptions_test.go +++ b/request/graphql/schema/descriptions_test.go @@ -58,6 +58,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, }, }, @@ -104,6 +105,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, { Name: "Author", @@ -132,6 +134,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, }, }, @@ -187,6 +190,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, { Name: "Author", @@ -224,6 +228,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, }, }, @@ -270,6 +275,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, { Name: "Author", @@ -298,6 +304,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, }, }, @@ -353,6 +360,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, { Name: "Author", @@ -390,6 +398,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, }, }, @@ -445,6 +454,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, { Name: "Author", @@ -482,6 +492,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, }, }, @@ -537,6 +548,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, { Name: "Author", @@ -568,6 +580,7 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, + Indexes: []client.IndexDescription{}, }, }, }, diff --git a/request/graphql/schema/errors.go b/request/graphql/schema/errors.go index 8720a04cd7..dd0e3baa63 100644 --- a/request/graphql/schema/errors.go +++ b/request/graphql/schema/errors.go @@ -22,6 +22,11 @@ const ( errTypeNotFound string = "no type found for given name" errRelationNotFound string = "no relation found" errNonNullForTypeNotSupported string = "NonNull variants for type are not supported" + errIndexMissingFields string = "index missing fields" + errIndexUnknownArgument string = "index with unknown argument" + errIndexInvalidArgument string = "index with invalid argument" + errIndexInvalidName string = "index with invalid name" + errRelationOneSided string = "relation must be defined on both schemas" ) var ( @@ -41,6 +46,10 @@ var ( // NonNull is the literal name of the GQL type, so we have to disable the linter //nolint:revive ErrNonNullNotSupported = errors.New("NonNull fields are not currently supported") + ErrIndexMissingFields = errors.New(errIndexMissingFields) + ErrIndexWithUnknownArg = errors.New(errIndexUnknownArgument) + ErrIndexWithInvalidArg = errors.New(errIndexInvalidArgument) + ErrRelationOneSided = errors.New(errRelationOneSided) ) func NewErrDuplicateField(objectName, fieldName string) error { @@ -51,6 +60,10 @@ func NewErrDuplicateField(objectName, fieldName string) error { ) } +func NewErrIndexWithInvalidName(name string) error { + return errors.New(errIndexInvalidName, errors.NewKV("Name", name)) +} + func NewErrFieldMissingRelation(objectName, fieldName string, objectType string) error { return errors.New( errFieldMissingRelation, @@ -68,6 +81,10 @@ func NewErrRelationMissingField(objectName, fieldName string) error { ) } +func NewErrRelationOneSided(typeName string) error { + return errors.New(errRelationOneSided, errors.NewKV("Type", typeName)) +} + func NewErrAggregateTargetNotFound(objectName, target string) error { return errors.New( errAggregateTargetNotFound, diff --git a/request/graphql/schema/generate.go b/request/graphql/schema/generate.go index 1b64f10dcd..e30693b3de 100644 --- a/request/graphql/schema/generate.go +++ b/request/graphql/schema/generate.go @@ -1116,20 +1116,14 @@ func (g *Generator) genTypeOrderArgInput(obj *gql.Object) *gql.InputObject { continue } typeMap := g.manager.schema.TypeMap() + configType, isOrderable := typeMap[genTypeName(field.Type, "OrderArg")] if gql.IsLeafType(field.Type) { // only Scalars, and enums fields[field.Name] = &gql.InputObjectFieldConfig{ Type: typeMap["Ordering"], } - } else { // sub objects - configType, isOrderable := typeMap[genTypeName(field.Type, "OrderArg")] - if !isOrderable { - fields[field.Name] = &gql.InputObjectFieldConfig{ - Type: &gql.InputObjectField{}, - } - } else { - fields[field.Name] = &gql.InputObjectFieldConfig{ - Type: configType, - } + } else if isOrderable { // sub objects + fields[field.Name] = &gql.InputObjectFieldConfig{ + Type: configType, } } } diff --git a/request/graphql/schema/index_test.go b/request/graphql/schema/index_test.go new file mode 100644 index 0000000000..379b84647d --- /dev/null +++ b/request/graphql/schema/index_test.go @@ -0,0 +1,303 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package schema + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/sourcenetwork/defradb/client" +) + +func TestStructIndex(t *testing.T) { + cases := []indexTestCase{ + { + description: "Index with a single field", + sdl: `type user @index(fields: ["name"]) {}`, + targetDescriptions: []client.IndexDescription{ + { + Name: "", + Fields: []client.IndexedFieldDescription{ + {Name: "name", Direction: client.Ascending}, + }, + }, + }, + }, + { + description: "Index with a name", + sdl: `type user @index(name: "userIndex", fields: ["name"]) {}`, + targetDescriptions: []client.IndexDescription{ + { + Name: "userIndex", + Fields: []client.IndexedFieldDescription{ + {Name: "name", Direction: client.Ascending}, + }, + }, + }, + }, + { + description: "Index with explicit ascending field", + sdl: `type user @index(fields: ["name"], directions: [ASC]) {}`, + targetDescriptions: []client.IndexDescription{ + { + Fields: []client.IndexedFieldDescription{ + {Name: "name", Direction: client.Ascending}}, + }, + }, + }, + { + description: "Index with descending field", + sdl: `type user @index(fields: ["name"], directions: [DESC]) {}`, + targetDescriptions: []client.IndexDescription{ + { + Fields: []client.IndexedFieldDescription{ + {Name: "name", Direction: client.Descending}}, + }, + }, + }, + { + description: "Index with 2 fields", + sdl: `type user @index(fields: ["name", "age"]) {}`, + targetDescriptions: []client.IndexDescription{ + { + Fields: []client.IndexedFieldDescription{ + {Name: "name", Direction: client.Ascending}, + {Name: "age", Direction: client.Ascending}, + }, + }, + }, + }, + { + description: "Index with 2 fields and 2 directions", + sdl: `type user @index(fields: ["name", "age"], directions: [ASC, DESC]) {}`, + targetDescriptions: []client.IndexDescription{ + { + Fields: []client.IndexedFieldDescription{ + {Name: "name", Direction: client.Ascending}, + {Name: "age", Direction: client.Descending}, + }, + }, + }, + }, + } + + for _, test := range cases { + parseIndexAndTest(t, test) + } +} + +func TestInvalidStructIndex(t *testing.T) { + cases := []invalidIndexTestCase{ + { + description: "missing 'fields' argument", + sdl: `type user @index(name: "userIndex") {}`, + expectedErr: errIndexMissingFields, + }, + { + description: "unknown argument", + sdl: `type user @index(unknown: "something", fields: ["name"]) {}`, + expectedErr: errIndexUnknownArgument, + }, + { + description: "invalid index name type", + sdl: `type user @index(name: 1, fields: ["name"]) {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "index name starts with a number", + sdl: `type user @index(name: "1_user_name", fields: ["name"]) {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "index with empty name", + sdl: `type user @index(name: "", fields: ["name"]) {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "index name with spaces", + sdl: `type user @index(name: "user name", fields: ["name"]) {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "index name with special symbols", + sdl: `type user @index(name: "user!name", fields: ["name"]) {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "invalid 'fields' value type (not a list)", + sdl: `type user @index(fields: "name") {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "invalid 'fields' value type (not a string list)", + sdl: `type user @index(fields: [1]) {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "invalid 'directions' value type (not a list)", + sdl: `type user @index(fields: ["name"], directions: "ASC") {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "invalid 'directions' value type (not a string list)", + sdl: `type user @index(fields: ["name"], directions: [1]) {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "invalid 'directions' value type (invalid element value)", + sdl: `type user @index(fields: ["name"], directions: ["direction"]) {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "fewer directions than fields", + sdl: `type user @index(fields: ["name", "age"], directions: [ASC]) {}`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "more directions than fields", + sdl: `type user @index(fields: ["name"], directions: [ASC, DESC]) {}`, + expectedErr: errIndexInvalidArgument, + }, + } + + for _, test := range cases { + parseInvalidIndexAndTest(t, test) + } +} + +func TestFieldIndex(t *testing.T) { + cases := []indexTestCase{ + { + description: "field index", + sdl: `type user { + name: String @index + }`, + targetDescriptions: []client.IndexDescription{ + { + Name: "", + Fields: []client.IndexedFieldDescription{ + {Name: "name", Direction: client.Ascending}, + }, + }, + }, + }, + { + description: "field index with name", + sdl: `type user { + name: String @index(name: "nameIndex") + }`, + targetDescriptions: []client.IndexDescription{ + { + Name: "nameIndex", + Fields: []client.IndexedFieldDescription{ + {Name: "name", Direction: client.Ascending}, + }, + }, + }, + }, + } + + for _, test := range cases { + parseIndexAndTest(t, test) + } +} + +func TestInvalidFieldIndex(t *testing.T) { + cases := []invalidIndexTestCase{ + { + description: "forbidden 'field' argument", + sdl: `type user { + name: String @index(field: "name") + }`, + expectedErr: errIndexUnknownArgument, + }, + { + description: "forbidden 'direction' argument", + sdl: `type user { + name: String @index(direction: ASC) + }`, + expectedErr: errIndexUnknownArgument, + }, + { + description: "invalid field index name type", + sdl: `type user { + name: String @index(name: 1) + }`, + expectedErr: errIndexInvalidArgument, + }, + { + description: "field index name starts with a number", + sdl: `type user { + name: String @index(name: "1_user_name") + }`, + expectedErr: errIndexInvalidName, + }, + { + description: "field index with empty name", + sdl: `type user { + name: String @index(name: "") + }`, + expectedErr: errIndexInvalidName, + }, + { + description: "field index name with spaces", + sdl: `type user { + name: String @index(name: "user name") + }`, + expectedErr: errIndexInvalidName, + }, + { + description: "field index name with special symbols", + sdl: `type user { + name: String @index(name: "user!name") + }`, + expectedErr: errIndexInvalidName, + }, + } + + for _, test := range cases { + parseInvalidIndexAndTest(t, test) + } +} + +func parseIndexAndTest(t *testing.T, testCase indexTestCase) { + ctx := context.Background() + + cols, err := FromString(ctx, testCase.sdl) + assert.NoError(t, err, testCase.description) + assert.Equal(t, len(cols), 1, testCase.description) + assert.Equal(t, len(cols[0].Indexes), len(testCase.targetDescriptions), testCase.description) + + for i, d := range cols[0].Indexes { + assert.Equal(t, testCase.targetDescriptions[i], d, testCase.description) + } +} + +func parseInvalidIndexAndTest(t *testing.T, testCase invalidIndexTestCase) { + ctx := context.Background() + + _, err := FromString(ctx, testCase.sdl) + assert.ErrorContains(t, err, testCase.expectedErr, testCase.description) +} + +type indexTestCase struct { + description string + sdl string + targetDescriptions []client.IndexDescription +} + +type invalidIndexTestCase struct { + description string + sdl string + expectedErr string +} diff --git a/request/graphql/schema/manager.go b/request/graphql/schema/manager.go index 045a4e33ae..76a5441d70 100644 --- a/request/graphql/schema/manager.go +++ b/request/graphql/schema/manager.go @@ -112,6 +112,8 @@ func defaultMutationType() *gql.Object { func defaultDirectivesType() []*gql.Directive { return []*gql.Directive{ schemaTypes.ExplainDirective, + schemaTypes.IndexDirective, + schemaTypes.IndexFieldDirective, } } diff --git a/request/graphql/schema/types/types.go b/request/graphql/schema/types/types.go index ae01b30255..75f91fb2c5 100644 --- a/request/graphql/schema/types/types.go +++ b/request/graphql/schema/types/types.go @@ -22,6 +22,12 @@ const ( ExplainArgNameType string = "type" ExplainArgSimple string = "simple" ExplainArgExecute string = "execute" + ExplainArgDebug string = "debug" + + IndexDirectiveLabel = "index" + IndexDirectivePropName = "name" + IndexDirectivePropFields = "fields" + IndexDirectivePropDirections = "directions" ) var ( @@ -46,12 +52,17 @@ var ( Values: gql.EnumValueConfigMap{ ExplainArgSimple: &gql.EnumValueConfig{ Value: ExplainArgSimple, - Description: "Simple explaination - dump of the plan graph.", + Description: "Simple explanation - dump of the plan graph.", }, ExplainArgExecute: &gql.EnumValueConfig{ Value: ExplainArgExecute, - Description: "Deeper explaination - insights gathered by executing the plan graph.", + Description: "Deeper explanation - insights gathered by executing the plan graph.", + }, + + ExplainArgDebug: &gql.EnumValueConfig{ + Value: ExplainArgDebug, + Description: "Like simple explain, but more verbose nodes (no attributes).", }, }, }) @@ -73,6 +84,38 @@ var ( }, }) + IndexDirective *gql.Directive = gql.NewDirective(gql.DirectiveConfig{ + Name: IndexDirectiveLabel, + Description: "@index is a directive that can be used to create an index on a type.", + Args: gql.FieldConfigArgument{ + IndexDirectivePropName: &gql.ArgumentConfig{ + Type: gql.String, + }, + IndexDirectivePropFields: &gql.ArgumentConfig{ + Type: gql.NewList(gql.String), + }, + IndexDirectivePropDirections: &gql.ArgumentConfig{ + Type: gql.NewList(OrderingEnum), + }, + }, + Locations: []string{ + gql.DirectiveLocationObject, + }, + }) + + IndexFieldDirective *gql.Directive = gql.NewDirective(gql.DirectiveConfig{ + Name: IndexDirectiveLabel, + Description: "@index is a directive that can be used to create an index on a field.", + Args: gql.FieldConfigArgument{ + IndexDirectivePropName: &gql.ArgumentConfig{ + Type: gql.String, + }, + }, + Locations: []string{ + gql.DirectiveLocationField, + }, + }) + // PrimaryDirective @primary is used to indicate the primary // side of a one-to-one relationship. PrimaryDirective = gql.NewDirective(gql.DirectiveConfig{ diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000..a17fbcaf08 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,41 @@ +# Tests + +This directory contains two types of tests: benchmark tests (located in the bench directory) and integration tests (located in the integration directory). +In addition to these, unit tests are also distributed among the other directories of the source code. + +## Test Types + +### Benchmark Tests + +The bench directory contains benchmark tests that are used to measure and monitor the performance of the database. + +### Integration Tests + +The integration directory contains integration tests that ensure different components of the system work together correctly. + +### Unit Tests + +Unit tests are spread throughout the source code and are located in the same directories as the code they are testing. +These tests focus on small, isolated parts of the code to ensure each part is working as expected. + +## Mocks + +For unit tests, we sometimes use mocks. Mocks are automatically generated from Go interfaces using the mockery tool. +This helps to isolate the code being tested and provide more focused and reliable tests. + +To regenerate the mocks, run `make mock`. + +The mocks are typically generated into a separate mocks directory. + +You can manually generate a mock for a specific interface using the following command: + +```shell +mockery --name --with-expecter +``` + +Here, `--name` specifies the name of the interface for which to generate the mock. + +The `--with-expecter` option adds a helper struct for each method, making the mock strongly typed. +This leads to more generated code, but it removes the need to pass strings around and increases type safety. + +For more information on mockery, please refer to the [official repository](https://github.com/vektra/mockery). diff --git a/tests/bench/bench_util.go b/tests/bench/bench_util.go index d534a3c261..712423158c 100644 --- a/tests/bench/bench_util.go +++ b/tests/bench/bench_util.go @@ -13,9 +13,7 @@ package bench import ( "context" "fmt" - "hash/fnv" "math" - "math/rand" "os" "sync" "testing" @@ -43,26 +41,12 @@ var ( func init() { logging.SetConfig(logging.Config{Level: logging.NewLogLevelOption(logging.Error)}) - // create a consistent seed value for the random package - // so we don't have random fluctuations between runs - // (specifically thinking about the fixture generation stuff) - seed := hashToInt64("https://xkcd.com/221/") - rand.Seed(seed) - // assign if not empty if s := os.Getenv(storageEnvName); s != "" { storage = s } } -// hashToInt64 uses the FNV-1 hash to int -// algorithm -func hashToInt64(s string) int64 { - h := fnv.New64a() - h.Write([]byte(s)) - return int64(h.Sum64()) -} - func SetupCollections( b *testing.B, ctx context.Context, diff --git a/tests/bench/storage/utils.go b/tests/bench/storage/utils.go index e7b4e0d6d6..5c550f25db 100644 --- a/tests/bench/storage/utils.go +++ b/tests/bench/storage/utils.go @@ -12,7 +12,8 @@ package storage import ( "context" - "math/rand" + "crypto/rand" + mathRand "math/rand" "sort" "testing" @@ -327,5 +328,5 @@ func getSampledIndex(populationSize int, sampleSize int, i int) int { } pointsPerInterval := populationSize / sampleSize - return (i * pointsPerInterval) + rand.Intn(pointsPerInterval) + return (i * pointsPerInterval) + mathRand.Intn(pointsPerInterval) } diff --git a/tests/integration/backup/one_to_many/export_test.go b/tests/integration/backup/one_to_many/export_test.go new file mode 100644 index 0000000000..cbba06162b --- /dev/null +++ b/tests/integration/backup/one_to_many/export_test.go @@ -0,0 +1,99 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestBackupExport_JustUserCollection_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.BackupExport{ + Config: client.BackupConfig{ + Collections: []string{"User"}, + }, + ExpectedContent: `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_AllCollectionsMultipleDocsAndDocUpdate_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"age": 31}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"Book":[{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_AllCollectionsMultipleDocsAndMultipleDocUpdate_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "Game of chains", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"age": 31}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"Book":[{"_key":"bae-4399f189-138d-5d49-9e25-82e78463677b","_newKey":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Game of chains"},{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} diff --git a/tests/integration/backup/one_to_many/import_test.go b/tests/integration/backup/one_to_many/import_test.go new file mode 100644 index 0000000000..5cbae18416 --- /dev/null +++ b/tests/integration/backup/one_to_many/import_test.go @@ -0,0 +1,163 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestBackupImport_WithMultipleNoKeyAndMultipleCollections_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{ + "User":[ + {"age":30,"name":"John"}, + {"age":31,"name":"Smith"}, + {"age":32,"name":"Bob"} + ], + "Book":[ + {"name":"John and the sourcerers' stone"}, + {"name":"Game of chains"} + ] + }`, + }, + testUtils.Request{ + Request: ` + query { + User { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Smith", + "age": uint64(31), + }, + { + "name": "Bob", + "age": uint64(32), + }, + { + "name": "John", + "age": uint64(30), + }, + }, + }, + testUtils.Request{ + Request: ` + query { + Book { + name + } + }`, + Results: []map[string]any{ + { + "name": "John and the sourcerers' stone", + }, + { + "name": "Game of chains", + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndUpdatedDocs_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{ + "Book":[ + { + "_key":"bae-4399f189-138d-5d49-9e25-82e78463677b", + "_newKey":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff", + "author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "name":"Game of chains" + }, + { + "_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da", + "_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5", + "author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "name":"John and the sourcerers' stone" + } + ], + "User":[ + { + "_key":"bae-0648f44e-74e8-593b-a662-3310ec278927", + "_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927", + "age":31, + "name":"Bob" + }, + { + "_key":"bae-e933420a-988a-56f8-8952-6c245aebd519", + "_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "age":31, + "name":"John" + } + ] + }`, + }, + testUtils.Request{ + Request: ` + query { + User { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Bob", + "age": uint64(31), + }, + { + "name": "John", + "age": uint64(31), + }, + }, + }, + testUtils.Request{ + Request: ` + query { + Book { + name + author { + _key + } + } + }`, + Results: []map[string]any{ + { + "name": "Game of chains", + "author": map[string]any{ + "_key": "bae-807ea028-6c13-5f86-a72b-46e8b715a162", + }, + }, + { + "name": "John and the sourcerers' stone", + "author": map[string]any{ + "_key": "bae-807ea028-6c13-5f86-a72b-46e8b715a162", + }, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} diff --git a/tests/integration/backup/one_to_many/utils.go b/tests/integration/backup/one_to_many/utils.go new file mode 100644 index 0000000000..3f21a6ae80 --- /dev/null +++ b/tests/integration/backup/one_to_many/utils.go @@ -0,0 +1,47 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +var schemas = (` + type User { + name: String + age: Int + books: [Book] + } + + type Book { + name: String + author: User + } +`) + +func executeTestCase(t *testing.T, test testUtils.TestCase) { + testUtils.ExecuteTestCase( + t, + testUtils.TestCase{ + Description: test.Description, + Actions: append( + []any{ + testUtils.SchemaUpdate{ + Schema: schemas, + }, + }, + test.Actions..., + ), + }, + ) +} diff --git a/tests/integration/backup/one_to_one/export_test.go b/tests/integration/backup/one_to_one/export_test.go new file mode 100644 index 0000000000..f6d6123d52 --- /dev/null +++ b/tests/integration/backup/one_to_one/export_test.go @@ -0,0 +1,243 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestBackupExport_JustUserCollection_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.BackupExport{ + Config: client.BackupConfig{ + Collections: []string{"User"}, + }, + ExpectedContent: `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_AllCollectionsMultipleDocsAndDocUpdate_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"age": 31}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"Book":[{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} + +// note: This test should fail at the second book creation since the relationship is 1-to-1 and this +// effectively creates a 1-to-many relationship +func TestBackupExport_AllCollectionsMultipleDocsAndMultipleDocUpdate_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "Game of chains", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"age": 31}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"Book":[{"_key":"bae-4399f189-138d-5d49-9e25-82e78463677b","_newKey":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Game of chains"},{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_DoubleReletionship_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + book: Book @relation(name: "written_books") + favouriteBook: Book @relation(name: "favourite_books") + } + type Book { + name: String + author: User @relation(name: "written_books") + favourite: User @relation(name: "favourite_books") + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519", "favourite": "bae-0648f44e-74e8-593b-a662-3310ec278927"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"age": 31}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"Book":[{"_key":"bae-45b1def4-4e63-5a93-a1b8-f7b08e682164","_newKey":"bae-add2ccfe-84a1-519c-ab7d-c54b43909532","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-0648f44e-74e8-593b-a662-3310ec278927","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestBackupExport_DoubleReletionshipWithUpdate_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + book: Book @relation(name: "written_books") + favouriteBook: Book @relation(name: "favourite_books") + } + type Book { + name: String + author: User @relation(name: "written_books") + favourite: User @relation(name: "favourite_books") + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519", "favourite": "bae-0648f44e-74e8-593b-a662-3310ec278927"}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "Game of chains"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"age": 31}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"Book":[{"_key":"bae-45b1def4-4e63-5a93-a1b8-f7b08e682164","_newKey":"bae-add2ccfe-84a1-519c-ab7d-c54b43909532","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-0648f44e-74e8-593b-a662-3310ec278927","name":"John and the sourcerers' stone"},{"_key":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","_newKey":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","name":"Game of chains"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +// note: This test should fail at the second book creation since the relationship is 1-to-1 and this +// effectively creates a 1-to-many relationship +func TestBackupExport_DoubleReletionshipWithUpdateAndDoublylinked_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + book: Book @relation(name: "written_books") + favouriteBook: Book @relation(name: "favourite_books") + } + type Book { + name: String + author: User @relation(name: "written_books") + favourite: User @relation(name: "favourite_books") + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519", "favourite": "bae-0648f44e-74e8-593b-a662-3310ec278927"}`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{"name": "Game of chains"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"age": 31, "book_id": "bae-da7f2d88-05c4-528a-846a-0d18ab26603b"}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"Book":[{"_key":"bae-45b1def4-4e63-5a93-a1b8-f7b08e682164","_newKey":"bae-add2ccfe-84a1-519c-ab7d-c54b43909532","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-0648f44e-74e8-593b-a662-3310ec278927","name":"John and the sourcerers' stone"},{"_key":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","_newKey":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Game of chains"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/backup/one_to_one/import_test.go b/tests/integration/backup/one_to_one/import_test.go new file mode 100644 index 0000000000..9abe59f06a --- /dev/null +++ b/tests/integration/backup/one_to_one/import_test.go @@ -0,0 +1,294 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestBackupImport_WithMultipleNoKeyAndMultipleCollections_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{ + "User":[ + {"age":30,"name":"John"}, + {"age":31,"name":"Smith"}, + {"age":32,"name":"Bob"} + ], + "Book":[ + {"name":"John and the sourcerers' stone"}, + {"name":"Game of chains"} + ] + }`, + }, + testUtils.Request{ + Request: ` + query { + User { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Smith", + "age": uint64(31), + }, + { + "name": "Bob", + "age": uint64(32), + }, + { + "name": "John", + "age": uint64(30), + }, + }, + }, + testUtils.Request{ + Request: ` + query { + Book { + name + } + }`, + Results: []map[string]any{ + { + "name": "John and the sourcerers' stone", + }, + { + "name": "Game of chains", + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndUpdatedDocs_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{ + "Book":[ + { + "_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da", + "_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5", + "author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "name":"John and the sourcerers' stone" + } + ], + "User":[ + { + "_key":"bae-0648f44e-74e8-593b-a662-3310ec278927", + "_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927", + "age":31, + "name":"Bob" + }, + { + "_key":"bae-e933420a-988a-56f8-8952-6c245aebd519", + "_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "age":31, + "name":"John" + } + ] + }`, + }, + testUtils.Request{ + Request: ` + query { + User { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Bob", + "age": uint64(31), + }, + { + "name": "John", + "age": uint64(31), + }, + }, + }, + testUtils.Request{ + Request: ` + query { + Book { + name + author { + _key + } + } + }`, + Results: []map[string]any{ + { + "name": "John and the sourcerers' stone", + "author": map[string]any{ + "_key": "bae-807ea028-6c13-5f86-a72b-46e8b715a162", + }, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +// note: This test should fail at the second book creation since the relationship is 1-to-1 and this +// effectively creates a 1-to-many relationship: +// https://github.com/sourcenetwork/defradb/issues/1646 +func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndMultipleUpdatedDocs_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{ + "Book":[ + { + "_key":"bae-4399f189-138d-5d49-9e25-82e78463677b", + "_newKey":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff", + "author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "name":"Game of chains" + }, + { + "_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da", + "_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5", + "author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "name":"John and the sourcerers' stone" + } + ], + "User":[ + { + "_key":"bae-0648f44e-74e8-593b-a662-3310ec278927", + "_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927", + "age":31, + "name":"Bob" + }, + { + "_key":"bae-e933420a-988a-56f8-8952-6c245aebd519", + "_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162", + "age":31, + "name":"John" + } + ] + }`, + }, + testUtils.Request{ + Request: ` + query { + User { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Bob", + "age": uint64(31), + }, + { + "name": "John", + "age": uint64(31), + }, + }, + }, + testUtils.Request{ + Request: ` + query { + Book { + name + author { + _key + } + } + }`, + Results: []map[string]any{ + { + "name": "Game of chains", + "author": map[string]any{ + "_key": "bae-807ea028-6c13-5f86-a72b-46e8b715a162", + }, + }, + { + "name": "John and the sourcerers' stone", + "author": map[string]any{ + "_key": "bae-807ea028-6c13-5f86-a72b-46e8b715a162", + }, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupImport_DoubleRelationshipWithUpdate_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + book: Book @relation(name: "written_books") + favouriteBook: Book @relation(name: "favourite_books") + } + type Book { + name: String + author: User @relation(name: "written_books") + favourite: User @relation(name: "favourite_books") + } + `, + }, + testUtils.BackupImport{ + ImportContent: `{"Book":[{"_key":"bae-236c14bd-4621-5d43-bc03-4442f3b8719e","_newKey":"bae-6dbb3738-d3db-5121-acee-6fbdd97ff7a8","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"},{"_key":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","_newKey":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","name":"Game of chains"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + }, + testUtils.Request{ + Request: ` + query { + Book { + name + author { + name + favouriteBook { + name + } + } + } + }`, + Results: []map[string]any{ + { + "name": "John and the sourcerers' stone", + "author": map[string]any{ + "name": "John", + "favouriteBook": map[string]any{ + "name": "John and the sourcerers' stone", + }, + }, + }, + { + "name": "Game of chains", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/backup/one_to_one/utils.go b/tests/integration/backup/one_to_one/utils.go new file mode 100644 index 0000000000..0dfe540cb8 --- /dev/null +++ b/tests/integration/backup/one_to_one/utils.go @@ -0,0 +1,47 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +var schemas = (` + type User { + name: String + age: Int + book: Book + } + + type Book { + name: String + author: User @primary + } +`) + +func executeTestCase(t *testing.T, test testUtils.TestCase) { + testUtils.ExecuteTestCase( + t, + testUtils.TestCase{ + Description: test.Description, + Actions: append( + []any{ + testUtils.SchemaUpdate{ + Schema: schemas, + }, + }, + test.Actions..., + ), + }, + ) +} diff --git a/tests/integration/backup/self_reference/export_test.go b/tests/integration/backup/self_reference/export_test.go new file mode 100644 index 0000000000..21eb5e95d3 --- /dev/null +++ b/tests/integration/backup/self_reference/export_test.go @@ -0,0 +1,121 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestBackupExport_Simple_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31, "boss": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.BackupExport{ + Config: client.BackupConfig{ + Collections: []string{"User"}, + }, + ExpectedContent: `{"User":[{"_key":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d","_newKey":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d","age":31,"boss_id":"bae-e933420a-988a-56f8-8952-6c245aebd519","name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_MultipleDocsAndDocUpdate_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31, "boss": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"age": 31}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"User":[{"_key":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d","_newKey":"bae-067fd15e-32a1-5681-8f41-c423f563e21b","age":31,"boss_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_MultipleDocsAndDocUpdateWithSelfReference_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31, "boss": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"boss_id": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"User":[{"_key":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d","_newKey":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d","age":31,"boss_id":"bae-e933420a-988a-56f8-8952-6c245aebd519","name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"boss_id":"bae-e933420a-988a-56f8-8952-6c245aebd519","name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_MultipleDocsAndMultipleDocUpdateWithSelfReference_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "Bob", "age": 31, "boss": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"boss_id": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`, + }, + testUtils.UpdateDoc{ + CollectionID: 0, + DocID: 0, + Doc: `{"age": 31}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"User":[{"_key":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d","_newKey":"bae-067fd15e-32a1-5681-8f41-c423f563e21b","age":31,"boss_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"boss_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} diff --git a/tests/integration/backup/self_reference/import_test.go b/tests/integration/backup/self_reference/import_test.go new file mode 100644 index 0000000000..71c44361a1 --- /dev/null +++ b/tests/integration/backup/self_reference/import_test.go @@ -0,0 +1,372 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + "github.com/sourcenetwork/immutable" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestBackupSelfRefImport_Simple_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{ + "User":[ + { + "_key":"bae-790e7e49-f2e3-5ad6-83d9-5dfb6d8ba81d", + "age":31, + "boss_id":"bae-e933420a-988a-56f8-8952-6c245aebd519", + "name":"Bob" + }, + { + "_key":"bae-e933420a-988a-56f8-8952-6c245aebd519", + "age":30, + "name":"John" + } + ] + }`, + }, + testUtils.Request{ + Request: ` + query { + User { + name + boss { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Bob", + "boss": map[string]any{ + "name": "John", + }, + }, + { + "name": "John", + "boss": nil, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupSelfRefImport_SelfRef_NoError(t *testing.T) { + expectedExportData := `{` + + `"User":[` + + `{` + + `"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927",` + + `"_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927",` + + `"age":31,` + + `"boss_id":"bae-0648f44e-74e8-593b-a662-3310ec278927",` + + `"name":"Bob"` + + `}` + + `]` + + `}` + test := testUtils.TestCase{ + Actions: []any{ + // Configure 2 nodes for this test, we will export from the first + // and import to the second. + testUtils.RandomNetworkingConfig(), + testUtils.RandomNetworkingConfig(), + testUtils.SchemaUpdate{ + Schema: schemas, + }, + testUtils.CreateDoc{ + NodeID: immutable.Some(0), + Doc: `{ + "name": "Bob", + "age": 31 + }`, + }, + testUtils.UpdateDoc{ + NodeID: immutable.Some(0), + Doc: `{ + "boss_id": "bae-0648f44e-74e8-593b-a662-3310ec278927" + }`, + }, + testUtils.BackupExport{ + NodeID: immutable.Some(0), + ExpectedContent: expectedExportData, + }, + testUtils.BackupImport{ + NodeID: immutable.Some(1), + ImportContent: expectedExportData, + }, + testUtils.Request{ + NodeID: immutable.Some(1), + Request: ` + query { + User { + name + boss { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Bob", + "boss": map[string]any{ + "name": "Bob", + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestBackupSelfRefImport_PrimaryRelationWithSecondCollection_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Author { + name: String + book: Book @relation(name: "author_book") + reviewed: Book @relation(name: "reviewedBy_reviewed") + } + type Book { + name: String + author: Author @primary @relation(name: "author_book") + reviewedBy: Author @primary @relation(name: "reviewedBy_reviewed") + } + `, + }, + testUtils.BackupImport{ + ImportContent: `{ + "Author":[ + { + "name":"John" + } + ], + "Book":[ + { + "name":"John and the sourcerers' stone", + "author":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad", + "reviewedBy":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + } + ] + }`, + }, + testUtils.Request{ + Request: ` + query { + Book { + name + author { + name + reviewed { + name + } + } + } + }`, + Results: []map[string]any{ + { + "name": "John and the sourcerers' stone", + "author": map[string]any{ + "name": "John", + "reviewed": map[string]any{ + "name": "John and the sourcerers' stone", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestBackupSelfRefImport_PrimaryRelationWithSecondCollectionWrongOrder_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Author { + name: String + book: Book @relation(name: "author_book") + reviewed: Book @relation(name: "reviewedBy_reviewed") + } + type Book { + name: String + author: Author @primary @relation(name: "author_book") + reviewedBy: Author @primary @relation(name: "reviewedBy_reviewed") + } + `, + }, + testUtils.BackupImport{ + ImportContent: `{ + "Book":[ + { + "name":"John and the sourcerers' stone", + "author":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad", + "reviewedBy":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + } + ], + "Author":[ + { + "name":"John" + } + ] + }`, + }, + testUtils.Request{ + Request: ` + query { + Book { + name + author { + name + reviewed { + name + } + } + } + }`, + Results: []map[string]any{ + { + "name": "John and the sourcerers' stone", + "author": map[string]any{ + "name": "John", + "reviewed": map[string]any{ + "name": "John and the sourcerers' stone", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +// This test documents undesirable behaviour, as the documents are not linked. +// https://github.com/sourcenetwork/defradb/issues/1704 +func TestBackupSelfRefImport_SplitPrimaryRelationWithSecondCollection_NoError(t *testing.T) { + expectedExportData := `{` + + `"Author":[` + + `{` + + `"_key":"bae-d760e445-22ef-5956-9947-26de226891f6",` + + `"_newKey":"bae-e3a6ff01-33ff-55f4-88f9-d13db26274c8",` + + `"book_id":"bae-c821a0a9-7afc-583b-accb-dc99a09c1ff8",` + + `"name":"John"` + + `}` + + `],` + + `"Book":[` + + `{` + + `"_key":"bae-4059cb15-2b30-5049-b0df-64cc7ad9b5e4",` + + `"_newKey":"bae-c821a0a9-7afc-583b-accb-dc99a09c1ff8",` + + `"name":"John and the sourcerers' stone",` + + `"reviewedBy_id":"bae-e3a6ff01-33ff-55f4-88f9-d13db26274c8"` + + `}` + + `]` + + `}` + + test := testUtils.TestCase{ + Actions: []any{ + // Configure 2 nodes for this test, we will export from the first + // and import to the second. + testUtils.RandomNetworkingConfig(), + testUtils.RandomNetworkingConfig(), + testUtils.SchemaUpdate{ + Schema: ` + type Author { + name: String + book: Book @primary @relation(name: "author_book") + reviewed: Book @relation(name: "reviewedBy_reviewed") + } + type Book { + name: String + author: Author @relation(name: "author_book") + reviewedBy: Author @primary @relation(name: "reviewedBy_reviewed") + } + `, + }, + testUtils.CreateDoc{ + NodeID: immutable.Some(0), + CollectionID: 1, + // bae-4059cb15-2b30-5049-b0df-64cc7ad9b5e4 + Doc: `{ + "name": "John and the sourcerers' stone" + }`, + }, + testUtils.CreateDoc{ + NodeID: immutable.Some(0), + CollectionID: 0, + Doc: `{ + "name": "John", + "book": "bae-4059cb15-2b30-5049-b0df-64cc7ad9b5e4" + }`, + }, + testUtils.UpdateDoc{ + NodeID: immutable.Some(0), + CollectionID: 1, + DocID: 0, + Doc: `{ + "reviewedBy_id": "bae-d760e445-22ef-5956-9947-26de226891f6" + }`, + }, + /* + This fails due to the linked ticket. + https://github.com/sourcenetwork/defradb/issues/1704 + testUtils.BackupExport{ + NodeID: immutable.Some(0), + ExpectedContent: expectedExportData, + }, + */ + testUtils.BackupImport{ + NodeID: immutable.Some(1), + ImportContent: expectedExportData, + }, + testUtils.Request{ + NodeID: immutable.Some(1), + Request: ` + query { + Book { + name + author { + name + reviewed { + name + } + } + } + }`, + Results: []map[string]any{ + { + "name": "John and the sourcerers' stone", + "author": map[string]any{ + "name": "John", + "reviewed": nil, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/backup/self_reference/utils.go b/tests/integration/backup/self_reference/utils.go new file mode 100644 index 0000000000..da720e22d4 --- /dev/null +++ b/tests/integration/backup/self_reference/utils.go @@ -0,0 +1,43 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +var schemas = (` + type User { + name: String + age: Int + boss: User @primary @relation(name: "boss_minion") + minion: User @relation(name: "boss_minion") + } +`) + +func executeTestCase(t *testing.T, test testUtils.TestCase) { + testUtils.ExecuteTestCase( + t, + testUtils.TestCase{ + Description: test.Description, + Actions: append( + []any{ + testUtils.SchemaUpdate{ + Schema: schemas, + }, + }, + test.Actions..., + ), + }, + ) +} diff --git a/tests/integration/backup/simple/export_test.go b/tests/integration/backup/simple/export_test.go new file mode 100644 index 0000000000..08c05b044a --- /dev/null +++ b/tests/integration/backup/simple/export_test.go @@ -0,0 +1,107 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestBackupExport_Simple_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_Empty_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{}`, + }, + testUtils.BackupExport{ + ExpectedContent: `{"User":[{"_key":"bae-524bfa06-849c-5daf-b6df-05c2da80844d","_newKey":"bae-524bfa06-849c-5daf-b6df-05c2da80844d"}]}`, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_WithInvalidFilePath_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.BackupExport{ + Config: client.BackupConfig{ + Filepath: t.TempDir() + "/some/test.json", + }, + ExpectedError: "no such file or directory", + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_WithInvalidCollection_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.BackupExport{ + Config: client.BackupConfig{ + Collections: []string{"Invalid"}, + }, + ExpectedError: "failed to get collection: datastore: key not found. Name: Invalid", + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupExport_JustUserCollection_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.BackupExport{ + Config: client.BackupConfig{ + Collections: []string{"User"}, + }, + ExpectedContent: `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + }, + }, + } + + executeTestCase(t, test) +} diff --git a/tests/integration/backup/simple/import_test.go b/tests/integration/backup/simple/import_test.go new file mode 100644 index 0000000000..c6e98a29e8 --- /dev/null +++ b/tests/integration/backup/simple/import_test.go @@ -0,0 +1,206 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestBackupImport_Simple_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + }, + testUtils.Request{ + Request: ` + query { + User { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "John", + "age": uint64(30), + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupImport_WithInvalidFilePath_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + Filepath: t.TempDir() + "/some/test.json", + ExpectedError: "no such file or directory", + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupImport_WithInvalidCollection_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{"Invalid":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + ExpectedError: "failed to get collection: datastore: key not found. Name: Invalid", + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupImport_WithDocAlreadyExists_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{"name": "John", "age": 30}`, + }, + testUtils.BackupImport{ + ImportContent: `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, + ExpectedError: "a document with the given dockey already exists", + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupImport_WithNoKeys_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{"User":[{"age":30,"name":"John"}]}`, + }, + testUtils.Request{ + Request: ` + query { + User { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "John", + "age": uint64(30), + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupImport_WithMultipleNoKeys_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{"User":[ + {"age":30,"name":"John"}, + {"age":31,"name":"Smith"}, + {"age":32,"name":"Bob"} + ]}`, + }, + testUtils.Request{ + Request: ` + query { + User { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Smith", + "age": uint64(31), + }, + { + "name": "Bob", + "age": uint64(32), + }, + { + "name": "John", + "age": uint64(30), + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupImport_EmptyObject_NoError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{"User":[{}]}`, + }, + testUtils.Request{ + Request: ` + query { + User { + name + } + }`, + Results: []map[string]any{ + { + "name": nil, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestBackupImport_WithMultipleNoKeysAndInvalidField_Errors(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.BackupImport{ + ImportContent: `{"User":[ + {"age":30,"name":"John"}, + {"INVALID":31,"name":"Smith"}, + {"age":32,"name":"Bob"} + ]}`, + ExpectedError: "The given field does not exist. Name: INVALID", + }, + testUtils.Request{ + Request: ` + query { + User { + name + age + } + }`, + // No documents should have been commited + Results: []map[string]any{}, + }, + }, + } + + executeTestCase(t, test) +} diff --git a/tests/integration/backup/simple/utils.go b/tests/integration/backup/simple/utils.go new file mode 100644 index 0000000000..67b18580af --- /dev/null +++ b/tests/integration/backup/simple/utils.go @@ -0,0 +1,41 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package backup + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +var schemas = (` + type User { + name: String + age: Int + } +`) + +func executeTestCase(t *testing.T, test testUtils.TestCase) { + testUtils.ExecuteTestCase( + t, + testUtils.TestCase{ + Description: test.Description, + Actions: append( + []any{ + testUtils.SchemaUpdate{ + Schema: schemas, + }, + }, + test.Actions..., + ), + }, + ) +} diff --git a/tests/integration/change_detector.go b/tests/integration/change_detector.go index f7d6863cdc..15f17fb16b 100644 --- a/tests/integration/change_detector.go +++ b/tests/integration/change_detector.go @@ -89,8 +89,8 @@ func detectDbChangesInit(repository string, targetBranch string) { latestTargetCommitHash := getLatestCommit(repository, targetBranch) detectDbChangesCodeDir = path.Join(changeDetectorTempDir, "code", latestTargetCommitHash) - rand.Seed(time.Now().Unix()) - randNumber := rand.Int() + r := rand.New(rand.NewSource(time.Now().Unix())) + randNumber := r.Int() dbsDir := path.Join(changeDetectorTempDir, "dbs", fmt.Sprint(randNumber)) testPackagePath, isIntegrationTest := getTestPackagePath() diff --git a/tests/integration/cli/client_backup_export_test.go b/tests/integration/cli/client_backup_export_test.go new file mode 100644 index 0000000000..62f2677c7b --- /dev/null +++ b/tests/integration/cli/client_backup_export_test.go @@ -0,0 +1,118 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package clitest + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func createUser(t *testing.T, conf DefraNodeConfig) { + _, _ = runDefraCommand(t, conf, []string{ + "client", "query", `mutation { create_User(data: "{\"name\": \"John\"}") { _key } }`, + }) +} + +func TestBackup_IfNoArgs_ShowUsage(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stdout, _ := runDefraCommand(t, conf, []string{"client", "backup"}) + assertContainsSubstring(t, stdout, "Usage:") +} + +func TestBackupExport_ForAllCollections_ShouldExport(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + createUser(t, conf) + + filepath := t.TempDir() + "/test.json" + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "backup", "export", filepath, + }) + stopDefra() + + assertContainsSubstring(t, stdout, "success") + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + require.Equal( + t, + `{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`, + string(b), + ) +} + +func TestBackupExport_ForUserCollection_ShouldExport(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + createUser(t, conf) + + filepath := t.TempDir() + "/test.json" + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "backup", "export", filepath, "--collections", "User", + }) + stopDefra() + + assertContainsSubstring(t, stdout, "success") + + b, err := os.ReadFile(filepath) + require.NoError(t, err) + require.Equal( + t, + `{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`, + string(b), + ) +} + +func TestBackupExport_ForInvalidCollection_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + createUser(t, conf) + + filepath := t.TempDir() + "/test.json" + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "backup", "export", filepath, "--collections", "Invalid", + }) + stopDefra() + + assertContainsSubstring(t, stdout, "collection does not exist") +} + +func TestBackupExport_InvalidFilePath_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + createUser(t, conf) + + filepath := t.TempDir() + "/some/test.json" + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "backup", "export", filepath, "--collections", "Invalid", + }) + stopDefra() + + assertContainsSubstring(t, stdout, "invalid file path") +} diff --git a/tests/integration/cli/client_backup_import_test.go b/tests/integration/cli/client_backup_import_test.go new file mode 100644 index 0000000000..8290dbe6de --- /dev/null +++ b/tests/integration/cli/client_backup_import_test.go @@ -0,0 +1,109 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package clitest + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBackupImport_WithValidFile_ShouldImport(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + filepath := t.TempDir() + "/test.json" + + err := os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`), + 0644, + ) + require.NoError(t, err) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "backup", "import", filepath, + }) + stopDefra() + + assertContainsSubstring(t, stdout, "success") +} + +func TestBackupImport_WithExistingDoc_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + createUser(t, conf) + + filepath := t.TempDir() + "/test.json" + + err := os.WriteFile( + filepath, + []byte(`{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`), + 0644, + ) + require.NoError(t, err) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "backup", "import", filepath, + }) + stopDefra() + + assertContainsSubstring(t, stdout, "a document with the given dockey already exists") +} + +func TestBackupImport_ForInvalidCollection_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + createUser(t, conf) + + filepath := t.TempDir() + "/test.json" + + err := os.WriteFile( + filepath, + []byte(`{"Invalid":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`), + 0644, + ) + require.NoError(t, err) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "backup", "import", filepath, + }) + stopDefra() + + assertContainsSubstring(t, stdout, "failed to get collection: datastore: key not found. Name: Invalid") +} + +func TestBackupImport_InvalidFilePath_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + createUser(t, conf) + + filepath := t.TempDir() + "/some/test.json" + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "backup", "import", filepath, + }) + stopDefra() + + assertContainsSubstring(t, stdout, "invalid file path") +} diff --git a/tests/integration/cli/client_index_create_test.go b/tests/integration/cli/client_index_create_test.go new file mode 100644 index 0000000000..89d6a4a18a --- /dev/null +++ b/tests/integration/cli/client_index_create_test.go @@ -0,0 +1,102 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package clitest + +import ( + "testing" +) + +func createUserCollection(t *testing.T, conf DefraNodeConfig) { + createCollection(t, conf, `type User { name: String }`) +} + +func createCollection(t *testing.T, conf DefraNodeConfig, colSchema string) { + fileName := schemaFileFixture(t, "schema.graphql", colSchema) + stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fileName}) + assertContainsSubstring(t, stdout, "success") +} + +func TestIndex_IfNoArgs_ShowUsage(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stdout, _ := runDefraCommand(t, conf, []string{"client", "index"}) + assertContainsSubstring(t, stdout, "Usage:") +} + +func TestIndexCreate_IfNoArgs_ShowUsage(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + _, stderr := runDefraCommand(t, conf, []string{"client", "index", "create"}) + assertContainsSubstring(t, stderr, "Usage") +} + +func TestIndexCreate_IfNoFieldsArg_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "index", "create", + "--collection", "User", + }) + stopDefra() + + assertContainsSubstring(t, stderr, "missing argument") +} + +func TestIndexCreate_IfNoCollectionArg_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "index", "create", + "--fields", "Name", + }) + stopDefra() + + assertContainsSubstring(t, stderr, "missing argument") +} + +func TestIndexCreate_IfCollectionExists_ShouldCreateIndex(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "index", "create", + "--collection", "User", + "--fields", "name", + "--name", "users_name_index", + }) + nodeLog := stopDefra() + + jsonResponse := `{"data":{"index":{"Name":"users_name_index","ID":1,"Fields":[{"Name":"name","Direction":"ASC"}]}}}` + assertContainsSubstring(t, stdout, jsonResponse) + assertNotContainsSubstring(t, stdout, "errors") + assertNotContainsSubstring(t, nodeLog, "errors") +} + +func TestIndexCreate_IfInternalError_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "index", "create", + "--collection", "User", + "--fields", "Name", + "--name", "users_name_index", + }) + stopDefra() + + assertContainsSubstring(t, stdout, "errors") +} diff --git a/tests/integration/cli/client_index_drop_test.go b/tests/integration/cli/client_index_drop_test.go new file mode 100644 index 0000000000..ce03e29524 --- /dev/null +++ b/tests/integration/cli/client_index_drop_test.go @@ -0,0 +1,118 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package clitest + +import ( + "testing" +) + +func TestIndexDrop_IfNoArgs_ShowUsage(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + _, stderr := runDefraCommand(t, conf, []string{"client", "index", "drop"}) + assertContainsSubstring(t, stderr, "Usage") +} + +const userColIndexOnNameFieldName = "users_name_index" + +func createIndexOnName(t *testing.T, conf DefraNodeConfig) { + createIndexOnField(t, conf, "User", "name", userColIndexOnNameFieldName) +} + +func createIndexOnField(t *testing.T, conf DefraNodeConfig, colName, fieldName, indexName string) { + runDefraCommand(t, conf, []string{ + "client", "index", "create", + "--collection", colName, + "--fields", fieldName, + "--name", indexName, + }) +} + +func TestIndexDrop_IfNoNameArg_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + createIndexOnName(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "index", "drop", + "--collection", "User", + }) + stopDefra() + + assertContainsSubstring(t, stderr, "missing argument") +} + +func TestIndexDrop_IfNoCollectionArg_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + createIndexOnName(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "index", "drop", + "--name", "users_name_index", + }) + stopDefra() + + assertContainsSubstring(t, stderr, "missing argument") +} + +func TestIndexDrop_IfCollectionWithIndexExists_ShouldDropIndex(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + createIndexOnName(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "index", "drop", + "--collection", "User", + "--name", "users_name_index", + }) + nodeLog := stopDefra() + + jsonResponse := `{"data":{"result":"success"}}` + assertContainsSubstring(t, stdout, jsonResponse) + assertNotContainsSubstring(t, stdout, "errors") + assertNotContainsSubstring(t, nodeLog, "errors") +} + +func TestIndexDrop_IfCollectionDoesNotExist_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "index", "drop", + "--collection", "User", + "--name", "users_name_index", + }) + stopDefra() + + assertContainsSubstring(t, stdout, "errors") +} + +func TestIndexDrop_IfInternalError_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "index", "drop", + "--collection", "User", + "--name", "users_name_index", + }) + stopDefra() + + assertContainsSubstring(t, stdout, "errors") +} diff --git a/tests/integration/cli/client_index_list_test.go b/tests/integration/cli/client_index_list_test.go new file mode 100644 index 0000000000..cb2f7d5fac --- /dev/null +++ b/tests/integration/cli/client_index_list_test.go @@ -0,0 +1,96 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package clitest + +import ( + "encoding/json" + "testing" + + "github.com/sourcenetwork/defradb/client" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIndexList_IfCollectionIsNotSpecified_ShouldReturnAllIndexes(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createCollection(t, conf, `type User { name: String }`) + createCollection(t, conf, `type Product { name: String price: Int }`) + createIndexOnField(t, conf, "User", "name", "") + createIndexOnField(t, conf, "Product", "name", "") + createIndexOnField(t, conf, "Product", "price", "") + + stdout, _ := runDefraCommand(t, conf, []string{"client", "index", "list"}) + nodeLog := stopDefra() + + var resp struct { + Data struct { + Collections map[string][]client.IndexDescription `json:"collections"` + } `json:"data"` + } + err := json.Unmarshal([]byte(stdout[0]), &resp) + require.NoError(t, err) + + assert.Equal(t, len(resp.Data.Collections), 2) + assert.Equal(t, len(resp.Data.Collections["User"]), 1) + assert.Equal(t, len(resp.Data.Collections["Product"]), 2) + + assertNotContainsSubstring(t, stdout, "errors") + assertNotContainsSubstring(t, nodeLog, "errors") +} + +func TestIndexList_IfCollectionIsSpecified_ShouldReturnCollectionsIndexes(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + createUserCollection(t, conf) + createIndexOnName(t, conf) + + createCollection(t, conf, `type Product { name: String price: Int }`) + createIndexOnField(t, conf, "Product", "name", "") + createIndexOnField(t, conf, "Product", "price", "") + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "index", "list", + "--collection", "User", + }) + nodeLog := stopDefra() + + var resp struct { + Data struct { + Indexes []client.IndexDescription `json:"indexes"` + } `json:"data"` + } + err := json.Unmarshal([]byte(stdout[0]), &resp) + require.NoError(t, err) + + expectedDesc := client.IndexDescription{Name: userColIndexOnNameFieldName, ID: 1, Fields: []client.IndexedFieldDescription{{Name: "name", Direction: client.Ascending}}} + assert.Equal(t, 1, len(resp.Data.Indexes)) + assert.Equal(t, expectedDesc, resp.Data.Indexes[0]) + + assertNotContainsSubstring(t, stdout, "errors") + assertNotContainsSubstring(t, nodeLog, "errors") +} + +func TestIndexList_IfInternalError_ShouldFail(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "index", "list", + "--collection", "User", + }) + stopDefra() + + assertContainsSubstring(t, stdout, "errors") +} diff --git a/tests/integration/cli/client_schema_add_test.go b/tests/integration/cli/client_schema_add_test.go index 9d575ed374..12d2e5e539 100644 --- a/tests/integration/cli/client_schema_add_test.go +++ b/tests/integration/cli/client_schema_add_test.go @@ -30,7 +30,7 @@ func TestAddSchemaFromFile(t *testing.T) { nodeLog := stopDefra() - jsonReponse := `{"data":{"collections":[{"name":"User","id":"bafkreib5hb7mr7ecbdufd7mvv6va6mpxukjai7hpnqkhxonnw7lzwfqlja"}],"result":"success"}}` + jsonReponse := `{"data":{"collections":[{"name":"User","id":"bafkreib5hb7mr7ecbdufd7mvv6va6mpxukjai7hpnqkhxonnw7lzwfqlja","version_id":"bafkreib5hb7mr7ecbdufd7mvv6va6mpxukjai7hpnqkhxonnw7lzwfqlja"}],"result":"success"}}` assert.Contains(t, stdout, jsonReponse) assertNotContainsSubstring(t, nodeLog, "ERROR") } @@ -47,7 +47,7 @@ func TestAddSchemaWithDuplicateType(t *testing.T) { _ = stopDefra() - jsonReponse := `{"data":{"collections":[{"name":"Post","id":"bafkreicgpbla5wlogpinnm32arcqzptusdc5tzdznipqrf6nkroav6b25a"}],"result":"success"}}` + jsonReponse := `{"data":{"collections":[{"name":"Post","id":"bafkreicgpbla5wlogpinnm32arcqzptusdc5tzdznipqrf6nkroav6b25a","version_id":"bafkreicgpbla5wlogpinnm32arcqzptusdc5tzdznipqrf6nkroav6b25a"}],"result":"success"}}` assertContainsSubstring(t, stdout1, jsonReponse) assertContainsSubstring(t, stdout2, `schema type already exists. Name: Post`) } diff --git a/tests/integration/cli/client_schema_migration_get_test.go b/tests/integration/cli/client_schema_migration_get_test.go new file mode 100644 index 0000000000..dd70879433 --- /dev/null +++ b/tests/integration/cli/client_schema_migration_get_test.go @@ -0,0 +1,110 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package clitest + +import ( + "fmt" + "testing" + + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestSchemaMigrationGet_GivenOneArg_ShouldReturnError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "get", + "notAnArg", + }) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "too many arguments. Max: 0, Actual: 1") +} + +func TestSchemaMigrationGet_GivenNoMigrations_ShouldSucceed(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "get", + }) + _ = stopDefra() + + assertContainsSubstring(t, stdout, `{"data":{"configuration":[]}}`) +} + +func TestSchemaMigrationGet_GivenEmptyMigrationObj_ShouldSucceed(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", "{}", + }) + assertContainsSubstring(t, stdout, "success") + + stdout, _ = runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "get", + }) + _ = stopDefra() + + assertContainsSubstring(t, stdout, + `{"data":{"configuration":[{"SourceSchemaVersionID":"bae123","DestinationSchemaVersionID":"bae456","Lenses":null}]}}`, + ) +} + +func TestSchemaMigrationGet_GivenEmptyMigration_ShouldSucceed(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", `{"lenses": []}`, + }) + assertContainsSubstring(t, stdout, "success") + + stdout, _ = runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "get", + }) + _ = stopDefra() + + assertContainsSubstring(t, stdout, + `{"data":{"configuration":[{"SourceSchemaVersionID":"bae123","DestinationSchemaVersionID":"bae456","Lenses":[]}]}}`, + ) +} + +func TestSchemaMigrationGet_GivenMigration_ShouldSucceed(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", + fmt.Sprintf(`{"lenses": [{"path":"%s","arguments":{"dst":"verified","value":true}}]}`, lenses.SetDefaultModulePath), + }) + assertContainsSubstring(t, stdout, "success") + + stdout, _ = runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "get", + }) + _ = stopDefra() + + assertContainsSubstring(t, stdout, + `{"data":{"configuration":[{"SourceSchemaVersionID":"bae123","DestinationSchemaVersionID":"bae456","Lenses":[`+ + fmt.Sprintf( + `{"Path":"%s",`, + lenses.SetDefaultModulePath, + )+ + `"Inverse":false,"Arguments":{"dst":"verified","value":true}}`+ + `]}]}}`, + ) +} diff --git a/tests/integration/cli/client_schema_migration_set_test.go b/tests/integration/cli/client_schema_migration_set_test.go new file mode 100644 index 0000000000..d97a4e77d8 --- /dev/null +++ b/tests/integration/cli/client_schema_migration_set_test.go @@ -0,0 +1,244 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package clitest + +import ( + "fmt" + "testing" + + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestSchemaMigrationSet_GivenEmptyArgs_ShouldReturnError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{"client", "schema", "migration", "set"}) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "missing arguments. Required: src, dst, cfg") +} + +func TestSchemaMigrationSet_GivenOneArg_ShouldReturnError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", + }) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "missing arguments. Required: src, dst, cfg") +} + +func TestSchemaMigrationSet_GivenTwoArgs_ShouldReturnError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", + }) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "missing argument. Name: cfg") +} + +func TestSchemaMigrationSet_GivenFourArgs_ShouldReturnError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", "cfg", "extraArg", + }) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "too many arguments. Max: 3, Actual: 4") +} + +func TestSchemaMigrationSet_GivenEmptySrcArg_ShouldReturnError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "", "bae", "path", + }) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "missing argument. Name: src") +} + +func TestSchemaMigrationSet_GivenEmptyDstArg_ShouldReturnError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae", "", "path", + }) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "missing argument. Name: dst") +} + +func TestSchemaMigrationSet_GivenEmptyCfgArg_ShouldReturnError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", "", + }) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "missing argument. Name: cfg") +} + +func TestSchemaMigrationSet_GivenInvalidCfgJsonObject_ShouldError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", "{--notvalidjson", + }) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "invalid lens configuration: invalid character") +} + +func TestSchemaMigrationSet_GivenEmptyCfgObject_ShouldSucceed(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", "{}", + }) + _ = stopDefra() + + assertContainsSubstring(t, stdout, "success") +} + +func TestSchemaMigrationSet_GivenCfgWithNoLenses_ShouldSucceed(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", `{"lenses": []}`, + }) + _ = stopDefra() + + assertContainsSubstring(t, stdout, "success") +} + +func TestSchemaMigrationSet_GivenCfgWithNoLensesUppercase_ShouldSucceed(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", `{"Lenses": []}`, + }) + _ = stopDefra() + + assertContainsSubstring(t, stdout, "success") +} + +func TestSchemaMigrationSet_GivenCfgWithUnknownProp_ShouldError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", `{"NotAProp": []}`, + }) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "invalid lens configuration: json: unknown field") +} + +func TestSchemaMigrationSet_GivenCfgWithUnknownPath_ShouldError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + _, stderr := runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bae123", "bae456", `{"Lenses": [{"path":"notAPath"}]}`, + }) + _ = stopDefra() + + assertContainsSubstring(t, stderr, "no such file or directory") +} + +func TestSchemaMigrationSet_GivenCfgWithLenses_ShouldSucceedAndMigrateDoc(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", `type Users { name: String }`}) + assertContainsSubstring(t, stdout, "success") + + stdout, _ = runDefraCommand(t, conf, []string{"client", "query", `mutation { create_Users(data:"{\"name\":\"John\"}") { name } }`}) + assertContainsSubstring(t, stdout, `{"data":[{"name":"John"}]}`) + + stdout, _ = runDefraCommand(t, conf, []string{"client", "schema", "patch", + `[{ "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} }]`, + }) + assertContainsSubstring(t, stdout, "success") + + stdout, _ = runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + fmt.Sprintf(`{"lenses": [{"path":"%s","arguments":{"dst":"verified","value":true}}]}`, lenses.SetDefaultModulePath), + }) + assertContainsSubstring(t, stdout, "success") + + stdout, _ = runDefraCommand(t, conf, []string{"client", "query", "query { Users { name verified } }"}) + _ = stopDefra() + + assertContainsSubstring(t, stdout, `{"data":[{"name":"John","verified":true}]}`) +} + +func TestSchemaMigrationSet_GivenCfgWithLenseError_ShouldError(t *testing.T) { + conf := NewDefraNodeDefaultConfig(t) + stopDefra := runDefraNode(t, conf) + + stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", `type Users { name: String }`}) + assertContainsSubstring(t, stdout, "success") + + stdout, _ = runDefraCommand(t, conf, []string{"client", "query", `mutation { create_Users(data:"{\"name\":\"John\"}") { name } }`}) + assertContainsSubstring(t, stdout, `{"data":[{"name":"John"}]}`) + + stdout, _ = runDefraCommand(t, conf, []string{"client", "schema", "patch", + `[{ "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} }]`, + }) + assertContainsSubstring(t, stdout, "success") + + stdout, _ = runDefraCommand(t, conf, []string{ + "client", "schema", "migration", "set", + "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + // Do not set lens parameters in order to generate error + fmt.Sprintf(`{"lenses": [{"path":"%s"}]}`, lenses.SetDefaultModulePath), + }) + assertContainsSubstring(t, stdout, "success") + + stdout, _ = runDefraCommand(t, conf, []string{"client", "query", "query { Users { name verified } }"}) + _ = stopDefra() + + // Error generated from within lens module lazily executing within the query + assertContainsSubstring(t, stdout, "Parameters have not been set.") +} diff --git a/tests/integration/cli/utils.go b/tests/integration/cli/utils.go index aba0f8dc88..a5999231a4 100644 --- a/tests/integration/cli/utils.go +++ b/tests/integration/cli/utils.go @@ -34,7 +34,8 @@ import ( "github.com/sourcenetwork/defradb/config" ) -const COMMAND_TIMEOUT_SECONDS = 2 +const COMMAND_TIMEOUT_SECONDS = 2 * time.Second +const SUBCOMMAND_TIME_BUFFER_SECONDS = 200 * time.Millisecond type DefraNodeConfig struct { rootDir string @@ -85,18 +86,21 @@ func runDefraNode(t *testing.T, conf DefraNodeConfig) func() []string { cfg := config.DefaultConfig() ctx, cancel := context.WithCancel(context.Background()) - go func() { + ready := make(chan struct{}) + go func(ready chan struct{}) { defraCmd := cli.NewDefraCommand(cfg) defraCmd.RootCmd.SetArgs( append([]string{"start"}, args...), ) + ready <- struct{}{} err := defraCmd.Execute(ctx) assert.NoError(t, err) - }() - time.Sleep(1 * time.Second) // time buffer for it to start + }(ready) + <-ready + time.Sleep(SUBCOMMAND_TIME_BUFFER_SECONDS) cancelAndOutput := func() []string { cancel() - time.Sleep(1 * time.Second) // time buffer for it to stop + time.Sleep(SUBCOMMAND_TIME_BUFFER_SECONDS) lines, err := readLoglines(t, conf.logPath) assert.NoError(t, err) return lines @@ -115,7 +119,7 @@ func runDefraCommand(t *testing.T, conf DefraNodeConfig, args []string) (stdout, args = append(args, "--rootdir", t.TempDir()) } - ctx, cancel := context.WithTimeout(context.Background(), COMMAND_TIMEOUT_SECONDS*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), COMMAND_TIMEOUT_SECONDS) defer cancel() stdout, stderr = captureOutput(func() { diff --git a/tests/integration/collection/create/one_to_many/simple_test.go b/tests/integration/collection/create/one_to_many/simple_test.go index 8bd3807ee7..91f12b337e 100644 --- a/tests/integration/collection/create/one_to_many/simple_test.go +++ b/tests/integration/collection/create/one_to_many/simple_test.go @@ -20,7 +20,7 @@ import ( testUtils "github.com/sourcenetwork/defradb/tests/integration/collection" ) -func TestCreateSaveErrorsGivenValueInRelationField(t *testing.T) { +func TestCreateSaveGivenAliasValueInRelationField(t *testing.T) { doc, err := client.NewDocFromJSON( []byte( `{ @@ -41,7 +41,6 @@ func TestCreateSaveErrorsGivenValueInRelationField(t *testing.T) { }, }, }, - ExpectedError: "The given field does not exist", } executeTestCase(t, test) diff --git a/tests/integration/explain/utils.go b/tests/integration/explain.go similarity index 65% rename from tests/integration/explain/utils.go rename to tests/integration/explain.go index 5f94761f66..e4221ea76b 100644 --- a/tests/integration/explain/utils.go +++ b/tests/integration/explain.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package test_explain +package tests import ( "context" @@ -22,13 +22,9 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/logging" - - testUtils "github.com/sourcenetwork/defradb/tests/integration" ) var ( - log = logging.MustNewLogger("tests.integration.explain") - allPlanNodeNames = map[string]struct{}{ // Not a planNode but need it here as this is root of the explain graph. "explain": {}, @@ -77,15 +73,13 @@ type PlanNodeTargetCase struct { ExpectedAttributes any } -type ExplainRequestTestCase struct { - Description string +type ExplainRequest struct { + // NodeID is the node ID (index) of the node in which to explain. + NodeID immutable.Option[int] // Has to be a valid explain request type (one of: 'simple', 'debug', 'execute', 'predict'). Request string - // Docs is a map from Collection Index, to a list of docs in stringified JSON format - Docs map[int][]string - // The raw expected explain graph with everything (helpful for debugging purposes). // Note: This is not always asserted (i.e. ignored from the comparison if not provided). ExpectedFullGraph []map[string]any @@ -106,102 +100,60 @@ type ExplainRequestTestCase struct { ExpectedError string } -type databaseInfo struct { - name testUtils.DatabaseType - db client.DB -} - -func ExecuteExplainRequestTestCase( - t *testing.T, - schema string, - collectionNames []string, - explainTest ExplainRequestTestCase, +func executeExplainRequest( + s *state, + action ExplainRequest, ) { - if testUtils.DetectDbChanges && testUtils.DetectDbChangesPreTestChecks(t, collectionNames) { - return - } - // Must have a non-empty request. - if explainTest.Request == "" { - require.Fail(t, "Explain test must have a non-empty request.", explainTest.Description) + if action.Request == "" { + require.Fail(s.t, "Explain test must have a non-empty request.", s.testCase.Description) } // If no expected results are provided, then it's invalid use of this explain testing setup. - if explainTest.ExpectedError == "" && - explainTest.ExpectedPatterns == nil && - explainTest.ExpectedTargets == nil && - explainTest.ExpectedFullGraph == nil { - require.Fail(t, "Atleast one expected explain parameter must be provided.", explainTest.Description) + if action.ExpectedError == "" && + action.ExpectedPatterns == nil && + action.ExpectedTargets == nil && + action.ExpectedFullGraph == nil { + require.Fail(s.t, "Atleast one expected explain parameter must be provided.", s.testCase.Description) } // If we expect an error, then all other expected results should be empty (they shouldn't be provided). - if explainTest.ExpectedError != "" && - (explainTest.ExpectedFullGraph != nil || - explainTest.ExpectedPatterns != nil || - explainTest.ExpectedTargets != nil) { - require.Fail(t, "Expected error should not have other expected results with it.", explainTest.Description) - } - - ctx := context.Background() - dbs, err := getDatabases(ctx, t) - if testUtils.AssertError(t, explainTest.Description, err, explainTest.ExpectedError) { - return + if action.ExpectedError != "" && + (action.ExpectedFullGraph != nil || + action.ExpectedPatterns != nil || + action.ExpectedTargets != nil) { + require.Fail(s.t, "Expected error should not have other expected results with it.", s.testCase.Description) } - require.NotEmpty(t, dbs) - - for _, dbi := range dbs { - db := dbi.db - log.Info(ctx, explainTest.Description, logging.NewKV("Database", dbi.name)) - - if testUtils.DetectDbChanges { - t.SkipNow() - return - } - - setupDatabase( - ctx, - t, - dbi, - schema, - collectionNames, - explainTest.Description, - explainTest.ExpectedError, - explainTest.Docs, - immutable.None[map[int]map[int][]string](), - ) - result := db.ExecRequest(ctx, explainTest.Request) - if assertExplainRequestResults( - ctx, - t, + for _, node := range getNodes(action.NodeID, s.nodes) { + result := node.DB.ExecRequest(s.ctx, action.Request) + assertExplainRequestResults( + s.ctx, + s.t, + s.testCase.Description, &result.GQL, - explainTest, - ) { - continue - } - - if explainTest.ExpectedError != "" { - assert.Fail(t, "Expected an error however none was raised.", explainTest.Description) - } - - db.Close(ctx) + action, + ) } } func assertExplainRequestResults( ctx context.Context, t *testing.T, + description string, actualResult *client.GQLResult, - explainTest ExplainRequestTestCase, -) bool { - // Check expected error matches actual error. - if testUtils.AssertErrors( + action ExplainRequest, +) { + // Check expected error matches actual error. If it does we are done. + if AssertErrors( t, - explainTest.Description, + description, actualResult.Errors, - explainTest.ExpectedError, + action.ExpectedError, ) { - return true + return + } else if action.ExpectedError != "" { // If didn't find a match but did expected an error, then fail. + assert.Fail(t, "Expected an error however none was raised.", description) } // Note: if returned gql result is `nil` this panics (the panic seems useful while testing). @@ -210,15 +162,15 @@ func assertExplainRequestResults( // Check if the expected full explain graph (if provided) matches the actual full explain graph // that is returned, if doesn't match we would like to still see a diff comparison (handy while debugging). - if lengthOfExpectedFullGraph := len(explainTest.ExpectedFullGraph); explainTest.ExpectedFullGraph != nil { - require.Equal(t, lengthOfExpectedFullGraph, len(resultantData), explainTest.Description) + if lengthOfExpectedFullGraph := len(action.ExpectedFullGraph); action.ExpectedFullGraph != nil { + require.Equal(t, lengthOfExpectedFullGraph, len(resultantData), description) for index, actualResult := range resultantData { if lengthOfExpectedFullGraph > index { assert.Equal( t, - explainTest.ExpectedFullGraph[index], + action.ExpectedFullGraph[index], actualResult, - explainTest.Description, + description, ) } } @@ -226,29 +178,27 @@ func assertExplainRequestResults( // Ensure the complete high-level pattern matches, inother words check that all the // explain graph nodes are in the correct expected ordering. - if explainTest.ExpectedPatterns != nil { - require.Equal(t, len(explainTest.ExpectedPatterns), len(resultantData), explainTest.Description) + if action.ExpectedPatterns != nil { + require.Equal(t, len(action.ExpectedPatterns), len(resultantData), description) for index, actualResult := range resultantData { // Trim away all attributes (non-plan nodes) from the returned full explain graph result. - actualResultWithoutAttributes := trimExplainAttributes(t, explainTest.Description, actualResult) + actualResultWithoutAttributes := trimExplainAttributes(t, description, actualResult) assert.Equal( t, - explainTest.ExpectedPatterns[index], + action.ExpectedPatterns[index], actualResultWithoutAttributes, - explainTest.Description, + description, ) } } // Match the targeted node's attributes (subset assertions), with the expected attributes. // Note: This does not check if the node is in correct location or not. - if explainTest.ExpectedTargets != nil { - for _, target := range explainTest.ExpectedTargets { - assertExplainTargetCase(t, explainTest.Description, target, resultantData) + if action.ExpectedTargets != nil { + for _, target := range action.ExpectedTargets { + assertExplainTargetCase(t, description, target, resultantData) } } - - return false } func assertExplainTargetCase( @@ -470,91 +420,3 @@ func copyMap(originalMap map[string]any) map[string]any { } return newMap } - -func getDatabases(ctx context.Context, t *testing.T) ([]databaseInfo, error) { - databases := []databaseInfo{} - - for _, dbt := range testUtils.GetDatabaseTypes() { - db, _, err := testUtils.GetDatabase(ctx, t, dbt) - if err != nil { - return nil, err - } - - databases = append( - databases, - databaseInfo{ - name: dbt, - db: db, - }, - ) - } - - return databases, nil -} - -// setupDatabase is persisted for the sake of the explain tests as they use a different -// test executor that calls this function. -func setupDatabase( - ctx context.Context, - t *testing.T, - dbi databaseInfo, - schema string, - collectionNames []string, - description string, - expectedError string, - documents map[int][]string, - updates immutable.Option[map[int]map[int][]string], -) { - db := dbi.db - _, err := db.AddSchema(ctx, schema) - if testUtils.AssertError(t, description, err, expectedError) { - return - } - - collections := []client.Collection{} - for _, collectionName := range collectionNames { - col, err := db.GetCollectionByName(ctx, collectionName) - if testUtils.AssertError(t, description, err, expectedError) { - return - } - collections = append(collections, col) - } - - // insert docs - for collectionIndex, docs := range documents { - hasCollectionUpdates := false - collectionUpdates := map[int][]string{} - - if updates.HasValue() { - collectionUpdates, hasCollectionUpdates = updates.Value()[collectionIndex] - } - - for documentIndex, docStr := range docs { - doc, err := client.NewDocFromJSON([]byte(docStr)) - if testUtils.AssertError(t, description, err, expectedError) { - return - } - err = collections[collectionIndex].Save(ctx, doc) - if testUtils.AssertError(t, description, err, expectedError) { - return - } - - if hasCollectionUpdates { - documentUpdates, hasDocumentUpdates := collectionUpdates[documentIndex] - - if hasDocumentUpdates { - for _, u := range documentUpdates { - err = doc.SetWithJSON([]byte(u)) - if testUtils.AssertError(t, description, err, expectedError) { - return - } - err = collections[collectionIndex].Save(ctx, doc) - if testUtils.AssertError(t, description, err, expectedError) { - return - } - } - } - } - } - } -} diff --git a/tests/integration/explain/debug/basic_test.go b/tests/integration/explain/debug/basic_test.go new file mode 100644 index 0000000000..f97305d091 --- /dev/null +++ b/tests/integration/explain/debug/basic_test.go @@ -0,0 +1,41 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequest(t *testing.T) { + test := testUtils.TestCase{ + Description: "Explain (debug) a basic request, assert full graph.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + Request: `query @explain(type: debug) { + Author { + name + age + } + }`, + + ExpectedFullGraph: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/create_test.go b/tests/integration/explain/debug/create_test.go new file mode 100644 index 0000000000..029c0eaefe --- /dev/null +++ b/tests/integration/explain/debug/create_test.go @@ -0,0 +1,78 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var createPattern = dataMap{ + "explain": dataMap{ + "createNode": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} + +func TestDebugExplainMutationRequestWithCreate(t *testing.T) { + test := testUtils.TestCase{ + Description: "Explain (debug) mutation request with create.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + create_Author(data: "{\"name\": \"Shahzad Lone\",\"age\": 27,\"verified\": true}") { + name + age + } + }`, + + ExpectedPatterns: []dataMap{createPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainMutationRequestDoesNotCreateDocGivenDuplicate(t *testing.T) { + test := testUtils.TestCase{ + Description: "Explain (debug) mutation request with create, document exists.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + create_Author(data: "{\"name\": \"Shahzad Lone\",\"age\": 27}") { + name + age + } + }`, + + ExpectedPatterns: []dataMap{createPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/dagscan_test.go b/tests/integration/explain/debug/dagscan_test.go new file mode 100644 index 0000000000..647d378907 --- /dev/null +++ b/tests/integration/explain/debug/dagscan_test.go @@ -0,0 +1,188 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var dagScanPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "dagScanNode": dataMap{}, + }, + }, + }, +} + +func TestDebugExplainCommitsDagScanQueryOp(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) commits query-op.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + commits (dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", fieldId: "1") { + links { + cid + } + } + }`, + + ExpectedFullGraph: []dataMap{dagScanPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainCommitsDagScanQueryOpWithoutField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) commits query-op with only dockey (no field).", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + commits (dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3") { + links { + cid + } + } + }`, + + ExpectedFullGraph: []dataMap{dagScanPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainLatestCommitsDagScanQueryOp(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) latestCommits query-op.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + latestCommits(dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", fieldId: "1") { + cid + links { + cid + } + } + }`, + + ExpectedFullGraph: []dataMap{dagScanPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainLatestCommitsDagScanQueryOpWithoutField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) latestCommits query-op with only dockey (no field).", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + latestCommits(dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3") { + cid + links { + cid + } + } + }`, + + ExpectedFullGraph: []dataMap{dagScanPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainLatestCommitsDagScanWithoutDocKey_Failure(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) latestCommits query without DocKey.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + latestCommits(fieldId: "1") { + cid + links { + cid + } + } + }`, + + ExpectedError: "Field \"latestCommits\" argument \"dockey\" of type \"ID!\" is required but not provided.", + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainLatestCommitsDagScanWithoutAnyArguments_Failure(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) latestCommits query without any arguments.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + latestCommits { + cid + links { + cid + } + } + }`, + + ExpectedError: "Field \"latestCommits\" argument \"dockey\" of type \"ID!\" is required but not provided.", + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/delete_test.go b/tests/integration/explain/debug/delete_test.go new file mode 100644 index 0000000000..083c6163c2 --- /dev/null +++ b/tests/integration/explain/debug/delete_test.go @@ -0,0 +1,185 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var deletePattern = dataMap{ + "explain": dataMap{ + "deleteNode": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} + +func TestDebugExplainMutationRequestWithDeleteUsingFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) mutation request with delete using filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + delete_Author(filter: {name: {_eq: "Shahzad"}}) { + _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainMutationRequestWithDeleteUsingFilterToMatchEverything(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) mutation request with delete using filter to match everything.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + delete_Author(filter: {}) { + DeletedKeyByFilter: _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainMutationRequestWithDeleteUsingId(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) mutation request with delete using id.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + delete_Author(id: "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d") { + _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainMutationRequestWithDeleteUsingIds(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) mutation request with delete using ids.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + delete_Author(ids: [ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f" + ]) { + AliasKey: _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainMutationRequestWithDeleteUsingNoIds(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) mutation request with delete using no ids.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + delete_Author(ids: []) { + _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainMutationRequestWithDeleteUsingFilterAndIds(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) mutation request with delete using filter and ids.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + delete_Author( + ids: ["bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", "test"], + filter: { + _and: [ + {age: {_lt: 26}}, + {verified: {_eq: true}}, + ] + } + ) { + _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/delete_with_error_test.go b/tests/integration/explain/debug/delete_with_error_test.go new file mode 100644 index 0000000000..208106e098 --- /dev/null +++ b/tests/integration/explain/debug/delete_with_error_test.go @@ -0,0 +1,45 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainMutationRequestWithDeleteHavingNoSubSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) multation request with delete having no sub-selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + delete_Author( + ids: [ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f" + ] + ) + }`, + + ExpectedError: "Field \"delete_Author\" of type \"[Author]\" must have a sub selection.", + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/fixture.go b/tests/integration/explain/debug/fixture.go new file mode 100644 index 0000000000..f77c0a3839 --- /dev/null +++ b/tests/integration/explain/debug/fixture.go @@ -0,0 +1,23 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +type dataMap = map[string]any + +var basicPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, +} diff --git a/tests/integration/explain/debug/group_test.go b/tests/integration/explain/debug/group_test.go new file mode 100644 index 0000000000..af738627b8 --- /dev/null +++ b/tests/integration/explain/debug/group_test.go @@ -0,0 +1,86 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var groupPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "groupNode": dataMap{ + "selectNode": dataMap{ + "pipeNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithGroupByOnParent(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with group-by on parent.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author (groupBy: [age]) { + age + _group { + name + } + } + }`, + + ExpectedFullGraph: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithGroupByTwoFieldsOnParent(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with group-by two fields on parent.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author (groupBy: [age, name]) { + age + _group { + name + } + } + }`, + + ExpectedFullGraph: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/group_with_average_test.go b/tests/integration/explain/debug/group_with_average_test.go new file mode 100644 index 0000000000..e001c1a529 --- /dev/null +++ b/tests/integration/explain/debug/group_with_average_test.go @@ -0,0 +1,157 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var debugGroupAveragePattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "averageNode": dataMap{ + "countNode": dataMap{ + "sumNode": dataMap{ + "groupNode": dataMap{ + "selectNode": dataMap{ + "pipeNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithGroupByWithAverageOnAnInnerField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with group-by with average on inner field.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author (groupBy: [name]) { + name + _avg(_group: {field: age}) + } + }`, + + ExpectedPatterns: []dataMap{debugGroupAveragePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithAverageInsideTheInnerGroupOnAField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with group-by with average of the inner _group on a field.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author (groupBy: [name]) { + name + _avg(_group: {field: _avg}) + _group(groupBy: [verified]) { + verified + _avg(_group: {field: age}) + } + } + }`, + + ExpectedPatterns: []dataMap{debugGroupAveragePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithAverageInsideTheInnerGroupOnAFieldAndNestedGroupBy(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with group-by with average of the inner _group on a field and nested group-by.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author (groupBy: [name]) { + name + _avg(_group: {field: _avg}) + _group(groupBy: [verified]) { + verified + _avg(_group: {field: age}) + _group (groupBy: [age]){ + age + } + } + } + }`, + + ExpectedPatterns: []dataMap{debugGroupAveragePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithAverageInsideTheInnerGroupAndNestedGroupByWithAverage(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with average inside the inner _group and nested groupBy with average.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author (groupBy: [name]) { + name + _avg(_group: {field: _avg}) + _group(groupBy: [verified]) { + verified + _avg(_group: {field: age}) + _group (groupBy: [age]){ + age + _avg(_group: {field: age}) + } + } + } + }`, + + ExpectedPatterns: []dataMap{debugGroupAveragePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/group_with_dockey_child_test.go b/tests/integration/explain/debug/group_with_dockey_child_test.go new file mode 100644 index 0000000000..bc6555b961 --- /dev/null +++ b/tests/integration/explain/debug/group_with_dockey_child_test.go @@ -0,0 +1,47 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithDockeysOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with dockeys on inner _group.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + groupBy: [age] + ) { + age + _group(dockeys: ["bae-6a4c5bc5-b044-5a03-a868-8260af6f2254"]) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/group_with_dockey_test.go b/tests/integration/explain/debug/group_with_dockey_test.go new file mode 100644 index 0000000000..fc53731c6a --- /dev/null +++ b/tests/integration/explain/debug/group_with_dockey_test.go @@ -0,0 +1,82 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithDockeyOnParentGroupBy(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with a dockey on parent groupBy.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + groupBy: [age], + dockey: "bae-6a4c5bc5-b044-5a03-a868-8260af6f2254" + ) { + age + _group { + name + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithDockeysAndFilterOnParentGroupBy(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with dockeys and filter on parent groupBy.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + groupBy: [age], + filter: {age: {_eq: 20}}, + dockeys: [ + "bae-6a4c5bc5-b044-5a03-a868-8260af6f2254", + "bae-4ea9d148-13f3-5a48-a0ef-9ffd344caeed" + ] + ) { + age + _group { + name + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/group_with_filter_child_test.go b/tests/integration/explain/debug/group_with_filter_child_test.go new file mode 100644 index 0000000000..61a8a72f3e --- /dev/null +++ b/tests/integration/explain/debug/group_with_filter_child_test.go @@ -0,0 +1,75 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithFilterOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with filter on the inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author (groupBy: [age]) { + age + _group(filter: {age: {_gt: 63}}) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithFilterOnParentGroupByAndInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with filter on parent groupBy and on the inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author ( + groupBy: [age], + filter: {age: {_gt: 62}} + ) { + age + _group(filter: {age: {_gt: 63}}) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/group_with_filter_test.go b/tests/integration/explain/debug/group_with_filter_test.go new file mode 100644 index 0000000000..b55d798c3a --- /dev/null +++ b/tests/integration/explain/debug/group_with_filter_test.go @@ -0,0 +1,48 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithFilterOnGroupByParent(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with filter on parent groupBy.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author ( + groupBy: [age], + filter: {age: {_gt: 63}} + ) { + age + _group { + name + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/group_with_limit_child_test.go b/tests/integration/explain/debug/group_with_limit_child_test.go new file mode 100644 index 0000000000..335c8866f0 --- /dev/null +++ b/tests/integration/explain/debug/group_with_limit_child_test.go @@ -0,0 +1,75 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithLimitAndOffsetOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with limit and offset on inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(groupBy: [name]) { + name + _group(limit: 2, offset: 1) { + age + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithLimitAndOffsetOnMultipleInnerGroupSelections(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with limit and offset on multiple inner _group selections.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(groupBy: [name]) { + name + innerFirstGroup: _group(limit: 1, offset: 2) { + age + } + innerSecondGroup: _group(limit: 2) { + age + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/group_with_limit_test.go b/tests/integration/explain/debug/group_with_limit_test.go new file mode 100644 index 0000000000..82c84969fc --- /dev/null +++ b/tests/integration/explain/debug/group_with_limit_test.go @@ -0,0 +1,95 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var debugGroupLimitPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "limitNode": dataMap{ + "groupNode": dataMap{ + "selectNode": dataMap{ + "pipeNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithLimitAndOffsetOnParentGroupBy(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with limit and offset on parent groupBy.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + groupBy: [name], + limit: 1, + offset: 1 + ) { + name + _group { + age + } + } + }`, + + ExpectedPatterns: []dataMap{debugGroupLimitPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithLimitOnParentGroupByAndInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with limit and offset on parent groupBy and inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + groupBy: [name], + limit: 1 + ) { + name + _group(limit: 2) { + age + } + } + }`, + + ExpectedPatterns: []dataMap{debugGroupLimitPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/group_with_order_child_test.go b/tests/integration/explain/debug/group_with_order_child_test.go new file mode 100644 index 0000000000..19d97424dc --- /dev/null +++ b/tests/integration/explain/debug/group_with_order_child_test.go @@ -0,0 +1,105 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithDescendingOrderOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with order (descending) on inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(groupBy: [name]) { + name + _group (order: {age: DESC}){ + age + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithAscendingOrderOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with order (ascending) on inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(groupBy: [name]) { + name + _group (order: {age: ASC}){ + age + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithOrderOnNestedParentGroupByAndOnNestedParentsInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with order on nested parent groupBy and on nested parent's inner _group.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(groupBy: [name]) { + name + _group ( + groupBy: [verified], + order: {verified: ASC} + ){ + verified + _group (order: {age: DESC}) { + age + } + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/group_with_order_test.go b/tests/integration/explain/debug/group_with_order_test.go new file mode 100644 index 0000000000..a7e3b717b0 --- /dev/null +++ b/tests/integration/explain/debug/group_with_order_test.go @@ -0,0 +1,124 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var debugGroupOrderPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "orderNode": dataMap{ + "groupNode": dataMap{ + "selectNode": dataMap{ + "pipeNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithDescendingOrderOnParentGroupBy(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with order (descending) on parent groupBy.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + groupBy: [name], + order: {name: DESC} + ) { + name + _group { + age + } + } + }`, + + ExpectedPatterns: []dataMap{debugGroupOrderPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithAscendingOrderOnParentGroupBy(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with order (ascending) on parent groupBy.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + groupBy: [name], + order: {name: ASC} + ) { + name + _group { + age + } + } + }`, + + ExpectedPatterns: []dataMap{debugGroupOrderPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithOrderOnParentGroupByAndOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with order on parent groupBy and inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + groupBy: [name], + order: {name: DESC} + ) { + name + _group (order: {age: ASC}){ + age + } + } + }`, + + ExpectedPatterns: []dataMap{debugGroupOrderPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/top_with_average_test.go b/tests/integration/explain/debug/top_with_average_test.go new file mode 100644 index 0000000000..de16b72c4b --- /dev/null +++ b/tests/integration/explain/debug/top_with_average_test.go @@ -0,0 +1,98 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var topLevelAveragePattern = dataMap{ + "explain": dataMap{ + "topLevelNode": []dataMap{ + { + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + { + "sumNode": dataMap{}, + }, + { + "countNode": dataMap{}, + }, + { + "averageNode": dataMap{}, + }, + }, + }, +} + +func TestDebugExplainTopLevelAverageRequest(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) top-level average request with filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + _avg( + Author: { + field: age + } + ) + }`, + + ExpectedPatterns: []dataMap{topLevelAveragePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainTopLevelAverageRequestWithFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) top-level average request with filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + _avg( + Author: { + field: age, + filter: { + age: { + _gt: 26 + } + } + } + ) + }`, + + ExpectedPatterns: []dataMap{topLevelAveragePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/top_with_count_test.go b/tests/integration/explain/debug/top_with_count_test.go new file mode 100644 index 0000000000..cceb3d1467 --- /dev/null +++ b/tests/integration/explain/debug/top_with_count_test.go @@ -0,0 +1,87 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var topLevelCountPattern = dataMap{ + "explain": dataMap{ + "topLevelNode": []dataMap{ + { + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + { + "countNode": dataMap{}, + }, + }, + }, +} + +func TestDebugExplainTopLevelCountRequest(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) top-level count request.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + _count(Author: {}) + }`, + + ExpectedPatterns: []dataMap{topLevelCountPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainTopLevelCountRequestWithFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) top-level count request with filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + _count( + Author: { + filter: { + age: { + _gt: 26 + } + } + } + ) + }`, + + ExpectedPatterns: []dataMap{topLevelCountPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/top_with_sum_test.go b/tests/integration/explain/debug/top_with_sum_test.go new file mode 100644 index 0000000000..ba2381a7cf --- /dev/null +++ b/tests/integration/explain/debug/top_with_sum_test.go @@ -0,0 +1,92 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var topLevelSumPattern = dataMap{ + "explain": dataMap{ + "topLevelNode": []dataMap{ + { + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + { + "sumNode": dataMap{}, + }, + }, + }, +} + +func TestDebugExplainTopLevelSumRequest(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) top-level sum request.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + _sum( + Author: { + field: age + } + ) + }`, + + ExpectedPatterns: []dataMap{topLevelSumPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainTopLevelSumRequestWithFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) top-level sum request with filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + _sum( + Author: { + field: age, + filter: { + age: { + _gt: 26 + } + } + } + ) + }`, + + ExpectedPatterns: []dataMap{topLevelSumPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/type_join_many_test.go b/tests/integration/explain/debug/type_join_many_test.go new file mode 100644 index 0000000000..85ca3399a5 --- /dev/null +++ b/tests/integration/explain/debug/type_join_many_test.go @@ -0,0 +1,56 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithAOneToManyJoin(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with a 1-to-M join.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + articles { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": normalTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/type_join_one_test.go b/tests/integration/explain/debug/type_join_one_test.go new file mode 100644 index 0000000000..55b1bc9b57 --- /dev/null +++ b/tests/integration/explain/debug/type_join_one_test.go @@ -0,0 +1,111 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithAOneToOneJoin(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with a 1-to-1 join.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + OnlyEmail: contact { + email + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinOne": normalTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithTwoLevelDeepNestedJoins(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with two level deep nested joins.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + contact { + email + address { + city + } + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinOne": dataMap{ + "root": dataMap{ + "scanNode": dataMap{}, + }, + "subType": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinOne": normalTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/type_join_test.go b/tests/integration/explain/debug/type_join_test.go new file mode 100644 index 0000000000..21608521c4 --- /dev/null +++ b/tests/integration/explain/debug/type_join_test.go @@ -0,0 +1,105 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var normalTypeJoinPattern = dataMap{ + "root": dataMap{ + "scanNode": dataMap{}, + }, + "subType": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, +} + +var debugTypeJoinPattern = dataMap{ + "root": dataMap{ + "multiScanNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + "subType": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, +} + +func TestDebugExplainRequestWith2SingleJoinsAnd1ManyJoin(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with 2 single joins and 1 many join.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + OnlyEmail: contact { + email + } + articles { + name + } + contact { + cell + email + } + } + }`, + + ExpectedFullGraph: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": dataMap{ + "typeJoinOne": debugTypeJoinPattern, + }, + }, + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + { + "typeIndexJoin": dataMap{ + "typeJoinOne": debugTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/type_join_with_filter_and_key_test.go b/tests/integration/explain/debug/type_join_with_filter_and_key_test.go new file mode 100644 index 0000000000..5219c5c874 --- /dev/null +++ b/tests/integration/explain/debug/type_join_with_filter_and_key_test.go @@ -0,0 +1,117 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithRelatedAndRegularFilterAndKeys(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with related and regular filter + keys.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + filter: { + name: {_eq: "John Grisham"}, + books: {name: {_eq: "Painted House"}} + }, + dockeys: [ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f8e" + ] + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": normalTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithManyRelatedFiltersAndKey(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with many related filters + key.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + filter: { + name: {_eq: "Cornelia Funke"}, + articles: {name: {_eq: "To my dear readers"}}, + books: {name: {_eq: "Theif Lord"}} + }, + dockeys: ["bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d"] + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/type_join_with_filter_test.go b/tests/integration/explain/debug/type_join_with_filter_test.go new file mode 100644 index 0000000000..2d4940b5bc --- /dev/null +++ b/tests/integration/explain/debug/type_join_with_filter_test.go @@ -0,0 +1,112 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithRelatedAndRegularFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with related and regular filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + filter: { + name: {_eq: "John Grisham"}, + books: {name: {_eq: "Painted House"}} + } + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": normalTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithManyRelatedFilters(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with many related filters.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + filter: { + name: {_eq: "Cornelia Funke"}, + articles: {name: {_eq: "To my dear readers"}}, + books: {name: {_eq: "Theif Lord"}} + } + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/update_test.go b/tests/integration/explain/debug/update_test.go new file mode 100644 index 0000000000..8a479837d6 --- /dev/null +++ b/tests/integration/explain/debug/update_test.go @@ -0,0 +1,161 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var updatePattern = dataMap{ + "explain": dataMap{ + "updateNode": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} + +func TestDebugExplainMutationRequestWithUpdateUsingBooleanFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) mutation request with update using boolean filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + update_Author( + filter: { + verified: { + _eq: true + } + }, + data: "{\"age\": 59}" + ) { + _key + name + age + } + }`, + + ExpectedPatterns: []dataMap{updatePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainMutationRequestWithUpdateUsingIds(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) mutation request with update using ids.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + update_Author( + ids: [ + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" + ], + data: "{\"age\": 59}" + ) { + _key + name + age + } + }`, + + ExpectedPatterns: []dataMap{updatePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainMutationRequestWithUpdateUsingId(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) mutation request with update using id.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + update_Author( + id: "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + data: "{\"age\": 59}" + ) { + _key + name + age + } + }`, + + ExpectedPatterns: []dataMap{updatePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainMutationRequestWithUpdateUsingIdsAndFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) mutation request with update using both ids and filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain(type: debug) { + update_Author( + filter: { + verified: { + _eq: true + } + }, + ids: [ + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" + ], + data: "{\"age\": 59}" + ) { + _key + name + age + } + }`, + + ExpectedPatterns: []dataMap{updatePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_average_join_test.go b/tests/integration/explain/debug/with_average_join_test.go new file mode 100644 index 0000000000..63d910d117 --- /dev/null +++ b/tests/integration/explain/debug/with_average_join_test.go @@ -0,0 +1,116 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var debugAverageTypeIndexJoinManyPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "averageNode": dataMap{ + "countNode": dataMap{ + "sumNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": normalTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithAverageOnJoinedField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with average on joined/related field.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + _avg(books: {field: pages}) + } + }`, + + ExpectedPatterns: []dataMap{debugAverageTypeIndexJoinManyPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithAverageOnMultipleJoinedFieldsWithFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with average on multiple joined fields with filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + _avg( + books: {field: pages}, + articles: {field: pages, filter: {pages: {_gt: 3}}} + ) + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "averageNode": dataMap{ + "countNode": dataMap{ + "sumNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_average_test.go b/tests/integration/explain/debug/with_average_test.go new file mode 100644 index 0000000000..9bc0f70194 --- /dev/null +++ b/tests/integration/explain/debug/with_average_test.go @@ -0,0 +1,59 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var averagePattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "averageNode": dataMap{ + "countNode": dataMap{ + "sumNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithAverageOnArrayField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with average on array field.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Book { + name + _avg(chapterPages: {}) + } + }`, + + ExpectedFullGraph: []dataMap{averagePattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_count_join_test.go b/tests/integration/explain/debug/with_count_join_test.go new file mode 100644 index 0000000000..ae96eeb2c5 --- /dev/null +++ b/tests/integration/explain/debug/with_count_join_test.go @@ -0,0 +1,108 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var debugCountTypeIndexJoinManyPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "countNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": normalTypeJoinPattern, + }, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithCountOnOneToManyJoinedField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with count on a one-to-many joined field.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + numberOfBooks: _count(books: {}) + } + }`, + + ExpectedPatterns: []dataMap{debugCountTypeIndexJoinManyPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithCountOnOneToManyJoinedFieldWithManySources(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with count on a one-to-many joined field with many sources.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + numberOfBooks: _count( + books: {} + articles: {} + ) + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "countNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_count_test.go b/tests/integration/explain/debug/with_count_test.go new file mode 100644 index 0000000000..ec542bed8e --- /dev/null +++ b/tests/integration/explain/debug/with_count_test.go @@ -0,0 +1,55 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var countPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "countNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithCountOnInlineArrayField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with count on an inline array field.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Book { + name + _count(chapterPages: {}) + } + }`, + + ExpectedPatterns: []dataMap{countPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_filter_key_test.go b/tests/integration/explain/debug/with_filter_key_test.go new file mode 100644 index 0000000000..5ca0939150 --- /dev/null +++ b/tests/integration/explain/debug/with_filter_key_test.go @@ -0,0 +1,159 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithDocKeyFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with dockey filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(dockey: "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d") { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithDocKeysFilterUsingOneKey(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with dockeys filter using one key.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(dockeys: ["bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d"]) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithDocKeysFilterUsingMultipleButDuplicateKeys(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with dockeys filter using multiple but duplicate keys.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + dockeys: [ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" + ] + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithDocKeysFilterUsingMultipleUniqueKeys(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with dockeys filter using multiple unique keys.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + dockeys: [ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f" + ] + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithMatchingKeyFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with a filter to match key.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + filter: { + _key: { + _eq: "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" + } + } + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_filter_test.go b/tests/integration/explain/debug/with_filter_test.go new file mode 100644 index 0000000000..67f69a406c --- /dev/null +++ b/tests/integration/explain/debug/with_filter_test.go @@ -0,0 +1,168 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithStringEqualFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with string equal (_eq) filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(filter: {name: {_eq: "Lone"}}) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithIntegerEqualFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with integer equal (_eq) filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(filter: {age: {_eq: 26}}) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithGreaterThanFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with greater than (_gt) filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(filter: {age: {_gt: 20}}) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithLogicalCompoundAndFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with logical compound (_and) filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(filter: {_and: [{age: {_gt: 20}}, {age: {_lt: 50}}]}) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithLogicalCompoundOrFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with logical compound (_or) filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(filter: {_or: [{age: {_eq: 55}}, {age: {_eq: 19}}]}) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithMatchInsideList(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request filtering values that match within (_in) a list.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(filter: {age: {_in: [19, 40, 55]}}) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_limit_count_test.go b/tests/integration/explain/debug/with_limit_count_test.go new file mode 100644 index 0000000000..84327fdaae --- /dev/null +++ b/tests/integration/explain/debug/with_limit_count_test.go @@ -0,0 +1,120 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDebugExplainRequestWithOnlyLimitOnRelatedChildWithCount(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with limit on related child with count.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + numberOfArts: _count(articles: {}) + articles(limit: 2) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "countNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugLimitTypeJoinManyPattern, + }, + }, + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithLimitArgsOnParentAndRelatedChildWithCount(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with limit args on parent and related child with count.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(limit: 3, offset: 1) { + numberOfArts: _count(articles: {}) + articles(limit: 2) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "limitNode": dataMap{ + "countNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugLimitTypeJoinManyPattern, + }, + }, + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_limit_join_test.go b/tests/integration/explain/debug/with_limit_join_test.go new file mode 100644 index 0000000000..028ac2f69b --- /dev/null +++ b/tests/integration/explain/debug/with_limit_join_test.go @@ -0,0 +1,208 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var limitTypeJoinPattern = dataMap{ + "root": dataMap{ + "scanNode": dataMap{}, + }, + "subType": dataMap{ + "selectTopNode": dataMap{ + "limitNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} + +var debugLimitTypeJoinManyPattern = dataMap{ + "root": dataMap{ + "multiScanNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + "subType": dataMap{ + "selectTopNode": dataMap{ + "limitNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithOnlyLimitOnRelatedChild(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with only limit on related child.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + articles(limit: 1) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": limitTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithOnlyOffsetOnRelatedChild(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with only offset on related child.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + articles(offset: 2) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": limitTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithBothLimitAndOffsetOnRelatedChild(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with both limit and offset on related child.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + articles(limit: 2, offset: 2) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": limitTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithLimitOnRelatedChildAndBothLimitAndOffsetOnParent(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with limit on related child & both limit + offset on parent.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(limit: 3, offset: 1) { + name + articles(limit: 2) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "limitNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": limitTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_limit_test.go b/tests/integration/explain/debug/with_limit_test.go new file mode 100644 index 0000000000..993032a364 --- /dev/null +++ b/tests/integration/explain/debug/with_limit_test.go @@ -0,0 +1,102 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var limitPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "limitNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithOnlyLimit(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with only limit.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(limit: 2) { + name + } + }`, + + ExpectedPatterns: []dataMap{limitPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithOnlyOffset(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with only offset.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(offset: 2) { + name + } + }`, + + ExpectedPatterns: []dataMap{limitPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithLimitAndOffset(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with limit and offset.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(limit: 3, offset: 1) { + name + } + }`, + + ExpectedPatterns: []dataMap{limitPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_order_join_test.go b/tests/integration/explain/debug/with_order_join_test.go new file mode 100644 index 0000000000..02b96496e6 --- /dev/null +++ b/tests/integration/explain/debug/with_order_join_test.go @@ -0,0 +1,143 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var orderTypeJoinPattern = dataMap{ + "root": dataMap{ + "scanNode": dataMap{}, + }, + "subType": dataMap{ + "selectTopNode": dataMap{ + "orderNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithOrderFieldOnRelatedChild(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with order field on a related child.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + articles(order: {name: DESC}) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": orderTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithOrderFieldOnParentAndRelatedChild(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with order field on parent and related child.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(order: {name: ASC}) { + name + articles(order: {name: DESC}) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "orderNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": orderTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWhereParentIsOrderedByItsRelatedChild(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request where parent is ordered by it's related child.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author( + order: { + articles: {name: ASC} + } + ) { + articles { + name + } + } + }`, + + ExpectedError: "Argument \"order\" has invalid value {articles: {name: ASC}}.\nIn field \"articles\": Unknown field.", + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_order_test.go b/tests/integration/explain/debug/with_order_test.go new file mode 100644 index 0000000000..cdcdfe35ac --- /dev/null +++ b/tests/integration/explain/debug/with_order_test.go @@ -0,0 +1,80 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var orderPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "orderNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithAscendingOrderOnParent(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with ascending order on parent.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(order: {age: ASC}) { + name + age + } + }`, + + ExpectedFullGraph: []dataMap{orderPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with multiple order fields on parent.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author(order: {name: ASC, age: DESC}) { + name + age + } + }`, + + ExpectedFullGraph: []dataMap{orderPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_sum_join_test.go b/tests/integration/explain/debug/with_sum_join_test.go new file mode 100644 index 0000000000..66c68be8ed --- /dev/null +++ b/tests/integration/explain/debug/with_sum_join_test.go @@ -0,0 +1,145 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var debugSumTypeIndexJoinManyPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "sumNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "typeJoinMany": normalTypeJoinPattern, + }, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithSumOnOneToManyJoinedField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with sum on a one-to-many joined field.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + _key + TotalPages: _sum( + books: {field: pages} + ) + } + }`, + + ExpectedPatterns: []dataMap{debugSumTypeIndexJoinManyPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithSumOnOneToManyJoinedFieldWithFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with sum on a one-to-many joined field, with filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + TotalPages: _sum( + articles: { + field: pages, + filter: { + name: { + _eq: "To my dear readers" + } + } + } + ) + } + }`, + + ExpectedPatterns: []dataMap{debugSumTypeIndexJoinManyPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDebugExplainRequestWithSumOnOneToManyJoinedFieldWithManySources(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with sum on a one-to-many joined field with many sources.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Author { + name + TotalPages: _sum( + books: {field: pages}, + articles: {field: pages} + ) + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "sumNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + { + "typeIndexJoin": dataMap{ + "typeJoinMany": debugTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/debug/with_sum_test.go b/tests/integration/explain/debug/with_sum_test.go new file mode 100644 index 0000000000..00ed6ddcc7 --- /dev/null +++ b/tests/integration/explain/debug/with_sum_test.go @@ -0,0 +1,55 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_debug + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +var sumPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "sumNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} + +func TestDebugExplainRequestWithSumOnInlineArrayField_ChildFieldWillBeEmpty(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (debug) request with sum on an inline array field.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: debug) { + Book { + name + NotSureWhySomeoneWouldSumTheChapterPagesButHereItIs: _sum(chapterPages: {}) + } + }`, + + ExpectedPatterns: []dataMap{sumPattern}, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/default/basic_test.go b/tests/integration/explain/default/basic_test.go index 1cc6b91331..2d7f515d9e 100644 --- a/tests/integration/explain/default/basic_test.go +++ b/tests/integration/explain/default/basic_test.go @@ -13,71 +13,69 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestDefaultExplainOnWrongFieldDirective_BadUsage(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) a request by providing the directive on wrong location (field).", - Request: `query { - Author @explain { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "John", - "age": 21 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query { + Author @explain { + name + age + } }`, + + ExpectedError: "Directive \"explain\" may not be used on FIELD.", }, }, - - ExpectedError: "Directive \"explain\" may not be used on FIELD.", } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithFullBasicGraph(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) a basic request.", - Request: `query @explain { - Author { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "John", - "age": 21 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author { + name + age + } }`, - }, - }, - ExpectedFullGraph: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "filter": nil, - "collectionID": "3", - "collectionName": "Author", - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + ExpectedFullGraph: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "_keys": nil, + "filter": nil, + "scanNode": dataMap{ + "filter": nil, + "collectionID": "3", + "collectionName": "Author", + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, }, @@ -88,32 +86,30 @@ func TestDefaultExplainRequestWithFullBasicGraph(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainWithAlias(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) a basic request with alias, no filter", - Request: `query @explain { - Author { - username: name - age: age - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "John", - "age": 21 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author { + username: name + age: age + } }`, + + ExpectedPatterns: []dataMap{basicPattern}, }, }, - - ExpectedPatterns: []dataMap{basicPattern}, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/create_test.go b/tests/integration/explain/default/create_test.go index 45334b9e26..eaecd7879f 100644 --- a/tests/integration/explain/default/create_test.go +++ b/tests/integration/explain/default/create_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -29,69 +30,76 @@ var createPattern = dataMap{ } func TestDefaultExplainMutationRequestWithCreate(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with create.", - Request: `mutation @explain { - create_Author(data: "{\"name\": \"Shahzad Lone\",\"age\": 27,\"verified\": true}") { - name - age - } - }`, - - ExpectedPatterns: []dataMap{createPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "createNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "data": dataMap{ - "age": float64(27), - "name": "Shahzad Lone", - "verified": true, + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain { + create_Author(data: "{\"name\": \"Shahzad Lone\",\"age\": 27,\"verified\": true}") { + name + age + } + }`, + + ExpectedPatterns: []dataMap{createPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "createNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "data": dataMap{ + "age": float64(27), + "name": "Shahzad Lone", + "verified": true, + }, + }, }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainMutationRequestDoesNotCreateDocGivenDuplicate(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with create, document exists.", - Request: `mutation @explain { - create_Author(data: "{\"name\": \"Shahzad Lone\",\"age\": 27}") { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "Shahzad Lone", - "age": 27 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain { + create_Author(data: "{\"name\": \"Shahzad Lone\",\"age\": 27}") { + name + age + } }`, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "createNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "data": dataMap{ - "age": float64(27), - "name": "Shahzad Lone", + ExpectedPatterns: []dataMap{createPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "createNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "data": dataMap{ + "age": float64(27), + "name": "Shahzad Lone", + }, + }, }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/dagscan_test.go b/tests/integration/explain/default/dagscan_test.go index 2befaf0cdd..a83402bb67 100644 --- a/tests/integration/explain/default/dagscan_test.go +++ b/tests/integration/explain/default/dagscan_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -27,49 +28,38 @@ var dagScanPattern = dataMap{ } func TestDefaultExplainCommitsDagScanQueryOp(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) commits query-op.", - Request: `query @explain { - commits (dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", fieldId: "1") { - links { - cid - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + commits (dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", fieldId: "1") { + links { + cid + } + } }`, - }, - }, - ExpectedPatterns: []dataMap{dagScanPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "dagScanNode", - IncludeChildNodes: true, // Shouldn't have any as this is the last node in the chain. - ExpectedAttributes: dataMap{ - "cid": nil, - "fieldId": "1", - "spans": []dataMap{ - { - "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/1", - "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/2", + ExpectedPatterns: []dataMap{dagScanPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "dagScanNode", + IncludeChildNodes: true, // Shouldn't have any as this is the last node in the chain. + ExpectedAttributes: dataMap{ + "cid": nil, + "fieldId": "1", + "spans": []dataMap{ + { + "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/1", + "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/2", + }, + }, }, }, }, @@ -77,53 +67,42 @@ func TestDefaultExplainCommitsDagScanQueryOp(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainCommitsDagScanQueryOpWithoutField(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) commits query-op with only dockey (no field).", - Request: `query @explain { - commits (dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3") { - links { - cid - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + commits (dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3") { + links { + cid + } + } }`, - }, - }, - ExpectedPatterns: []dataMap{dagScanPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "dagScanNode", - IncludeChildNodes: true, // Shouldn't have any as this is the last node in the chain. - ExpectedAttributes: dataMap{ - "cid": nil, - "fieldId": nil, - "spans": []dataMap{ - { - "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3", - "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df4", + ExpectedPatterns: []dataMap{dagScanPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "dagScanNode", + IncludeChildNodes: true, // Shouldn't have any as this is the last node in the chain. + ExpectedAttributes: dataMap{ + "cid": nil, + "fieldId": nil, + "spans": []dataMap{ + { + "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df4", + }, + }, }, }, }, @@ -131,54 +110,43 @@ func TestDefaultExplainCommitsDagScanQueryOpWithoutField(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainLatestCommitsDagScanQueryOp(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) latestCommits query-op.", - Request: `query @explain { - latestCommits(dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", fieldId: "1") { - cid - links { - cid - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + latestCommits(dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", fieldId: "1") { + cid + links { + cid + } + } }`, - }, - }, - ExpectedPatterns: []dataMap{dagScanPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "dagScanNode", - IncludeChildNodes: true, // Shouldn't have any as this is the last node in the chain. - ExpectedAttributes: dataMap{ - "cid": nil, - "fieldId": "1", - "spans": []dataMap{ - { - "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/1", - "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/2", + ExpectedPatterns: []dataMap{dagScanPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "dagScanNode", + IncludeChildNodes: true, // Shouldn't have any as this is the last node in the chain. + ExpectedAttributes: dataMap{ + "cid": nil, + "fieldId": "1", + "spans": []dataMap{ + { + "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/1", + "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/2", + }, + }, }, }, }, @@ -186,54 +154,43 @@ func TestDefaultExplainLatestCommitsDagScanQueryOp(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainLatestCommitsDagScanQueryOpWithoutField(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) latestCommits query-op with only dockey (no field).", - Request: `query @explain { - latestCommits(dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3") { - cid - links { - cid - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + latestCommits(dockey: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3") { + cid + links { + cid + } + } }`, - }, - }, - ExpectedPatterns: []dataMap{dagScanPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "dagScanNode", - IncludeChildNodes: true, // Shouldn't have any as this is the last node in the chain. - ExpectedAttributes: dataMap{ - "cid": nil, - "fieldId": "C", - "spans": []dataMap{ - { - "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/C", - "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/D", + ExpectedPatterns: []dataMap{dagScanPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "dagScanNode", + IncludeChildNodes: true, // Shouldn't have any as this is the last node in the chain. + ExpectedAttributes: dataMap{ + "cid": nil, + "fieldId": "C", + "spans": []dataMap{ + { + "start": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/C", + "end": "/bae-41598f0c-19bc-5da6-813b-e80f14a10df3/D", + }, + }, }, }, }, @@ -241,45 +198,59 @@ func TestDefaultExplainLatestCommitsDagScanQueryOpWithoutField(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainLatestCommitsDagScanWithoutDocKey_Failure(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) latestCommits query without DocKey.", - Request: `query @explain { - latestCommits(fieldId: "1") { - cid - links { - cid - } - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + latestCommits(fieldId: "1") { + cid + links { + cid + } + } + }`, - ExpectedError: "Field \"latestCommits\" argument \"dockey\" of type \"ID!\" is required but not provided.", + ExpectedError: "Field \"latestCommits\" argument \"dockey\" of type \"ID!\" is required but not provided.", + }, + }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainLatestCommitsDagScanWithoutAnyArguments_Failure(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) latestCommits query without any arguments.", - Request: `query @explain { - latestCommits { - cid - links { - cid - } - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedError: "Field \"latestCommits\" argument \"dockey\" of type \"ID!\" is required but not provided.", + testUtils.ExplainRequest{ + + Request: `query @explain { + latestCommits { + cid + links { + cid + } + } + }`, + + ExpectedError: "Field \"latestCommits\" argument \"dockey\" of type \"ID!\" is required but not provided.", + }, + }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/delete_test.go b/tests/integration/explain/default/delete_test.go index dcdf50f8b0..71f454b6e7 100644 --- a/tests/integration/explain/default/delete_test.go +++ b/tests/integration/explain/default/delete_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -29,57 +30,54 @@ var deletePattern = dataMap{ } func TestDefaultExplainMutationRequestWithDeleteUsingFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with delete using filter.", - Request: `mutation @explain { - delete_Author(filter: {name: {_eq: "Shahzad"}}) { - _key - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "Shahzad", - "age": 26, - "verified": true - }`, - }, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedPatterns: []dataMap{deletePattern}, + testUtils.ExplainRequest{ - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "deleteNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "filter": dataMap{ - "name": dataMap{ - "_eq": "Shahzad", + Request: `mutation @explain { + delete_Author(filter: {name: {_eq: "Shahzad"}}) { + _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "deleteNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "filter": dataMap{ + "name": dataMap{ + "_eq": "Shahzad", + }, + }, + "ids": []string(nil), }, }, - "ids": []string(nil), - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "name": dataMap{ - "_eq": "Shahzad", - }, - }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "name": dataMap{ + "_eq": "Shahzad", + }, + }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, }, @@ -87,73 +85,50 @@ func TestDefaultExplainMutationRequestWithDeleteUsingFilter(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainMutationRequestWithDeleteUsingFilterToMatchEverything(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with delete using filter to match everything.", - Request: `mutation @explain { - delete_Author(filter: {}) { - DeletedKeyByFilter: _key - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "Shahzad", - "age": 26, - "verified": true - }`, - `{ - "name": "Shahzad", - "age": 25, - "verified": true - }`, - `{ - "name": "Shahzad", - "age": 6, - "verified": true - }`, - `{ - "name": "Shahzad", - "age": 1, - "verified": true - }`, - `{ - "name": "Shahzad Lone", - "age": 26, - "verified": true - }`, - }, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedPatterns: []dataMap{deletePattern}, + testUtils.ExplainRequest{ - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "deleteNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "filter": dataMap{}, - "ids": []string(nil), - }, - }, + Request: `mutation @explain { + delete_Author(filter: {}) { + DeletedKeyByFilter: _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{}, - "spans": []dataMap{ - { - "end": "/4", - "start": "/3", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "deleteNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "filter": nil, + "ids": []string(nil), + }, + }, + + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "end": "/4", + "start": "/3", + }, + }, }, }, }, @@ -161,56 +136,52 @@ func TestDefaultExplainMutationRequestWithDeleteUsingFilterToMatchEverything(t * }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainMutationRequestWithDeleteUsingId(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with delete using id.", - Request: `mutation @explain { - delete_Author(id: "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d") { - _key - } - }`, - - Docs: map[int][]string{ - 2: { - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true - }`, - }, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedPatterns: []dataMap{deletePattern}, + testUtils.ExplainRequest{ - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "deleteNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "filter": nil, - "ids": []string{ - "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + Request: `mutation @explain { + delete_Author(id: "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d") { + _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "deleteNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "filter": nil, + "ids": []string{ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + }, }, - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", - "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + }, }, }, }, @@ -218,70 +189,60 @@ func TestDefaultExplainMutationRequestWithDeleteUsingId(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainMutationRequestWithDeleteUsingIds(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with delete using ids.", - Request: `mutation @explain { - delete_Author(ids: [ - "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f" - ]) { - AliasKey: _key - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true - }`, - }, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedPatterns: []dataMap{deletePattern}, + testUtils.ExplainRequest{ - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "deleteNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "filter": nil, - "ids": []string{ + Request: `mutation @explain { + delete_Author(ids: [ "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f" + ]) { + AliasKey: _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "deleteNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "filter": nil, + "ids": []string{ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + }, + }, }, - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", - "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - }, - { - "end": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67g", - "start": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + { + "end": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67g", + "start": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + }, + }, }, }, }, @@ -289,150 +250,143 @@ func TestDefaultExplainMutationRequestWithDeleteUsingIds(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainMutationRequestWithDeleteUsingNoIds(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with delete using no ids.", - Request: `mutation @explain { - delete_Author(ids: []) { - _key - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "Shahzad", - "age": 26, - "verified": true - }`, - }, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedPatterns: []dataMap{deletePattern}, + testUtils.ExplainRequest{ - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "deleteNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "filter": nil, - "ids": []string{}, - }, - }, + Request: `mutation @explain { + delete_Author(ids: []) { + _key + } + }`, + + ExpectedPatterns: []dataMap{deletePattern}, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{}, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "deleteNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "filter": nil, + "ids": []string{}, + }, + }, + + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{}, + }, + }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainMutationRequestWithDeleteUsingFilterAndIds(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with delete using filter and ids.", - Request: `mutation @explain { - delete_Author( - ids: ["bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", "test"], - filter: { - _and: [ - {age: {_lt: 26}}, - {verified: {_eq: true}}, - ] + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain { + delete_Author( + ids: ["bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", "test"], + filter: { + _and: [ + {age: {_lt: 26}}, + {verified: {_eq: true}}, + ] + } + ) { + _key } - ) { - _key - } - }`, - - Docs: map[int][]string{ - 2: { - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true - }`, - }, - }, - - ExpectedPatterns: []dataMap{deletePattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "deleteNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "filter": dataMap{ - "_and": []any{ - dataMap{ - "age": dataMap{ - "_lt": int32(26), + }`, + + ExpectedPatterns: []dataMap{deletePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "deleteNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "filter": dataMap{ + "_and": []any{ + dataMap{ + "age": dataMap{ + "_lt": int32(26), + }, + }, + dataMap{ + "verified": dataMap{ + "_eq": true, + }, + }, }, }, - dataMap{ - "verified": dataMap{ - "_eq": true, - }, + "ids": []string{ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "test", }, }, }, - "ids": []string{ - "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "test", - }, - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "_and": []any{ - dataMap{ - "age": dataMap{ - "_lt": int32(26), + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "_and": []any{ + dataMap{ + "age": dataMap{ + "_lt": int32(26), + }, + }, + dataMap{ + "verified": dataMap{ + "_eq": true, + }, + }, }, }, - dataMap{ - "verified": dataMap{ - "_eq": true, + "spans": []dataMap{ + { + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + { + "end": "/3/tesu", + "start": "/3/test", }, }, }, }, - "spans": []dataMap{ - { - "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", - "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - }, - { - "end": "/3/tesu", - "start": "/3/test", - }, - }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/delete_with_error_test.go b/tests/integration/explain/default/delete_with_error_test.go index add8af5048..cbe11c2591 100644 --- a/tests/integration/explain/default/delete_with_error_test.go +++ b/tests/integration/explain/default/delete_with_error_test.go @@ -13,37 +13,33 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestDefaultExplainMutationRequestWithDeleteHavingNoSubSelection(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) multation request with delete having no sub-selection.", - Request: `mutation @explain { - delete_Author(ids: ["bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f"]) - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain { + delete_Author( + ids: [ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f" + ] + ) + }`, + + ExpectedError: "Field \"delete_Author\" of type \"[Author]\" must have a sub selection.", }, }, - - ExpectedError: "Field \"delete_Author\" of type \"[Author]\" must have a sub selection.", } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/fixture.go b/tests/integration/explain/default/fixture.go new file mode 100644 index 0000000000..306e3e133f --- /dev/null +++ b/tests/integration/explain/default/fixture.go @@ -0,0 +1,32 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_default + +type dataMap = map[string]any + +var basicPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, +} + +var emptyChildSelectsAttributeForAuthor = dataMap{ + "collectionName": "Author", + "docKeys": nil, + "filter": nil, + "groupBy": nil, + "limit": nil, + "orderBy": nil, +} diff --git a/tests/integration/explain/default/group_test.go b/tests/integration/explain/default/group_test.go index 382d181e24..d631badcde 100644 --- a/tests/integration/explain/default/group_test.go +++ b/tests/integration/explain/default/group_test.go @@ -14,70 +14,50 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) -func TestExplainSimpleGroupByOnParent(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain a grouping on parent.", - - Request: `query @explain { - Author (groupBy: [age]) { - age - _group { - name - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "age": 65 - }`, +var groupPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "groupNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, +} - `{ - "name": "Cornelia Funke", - "age": 62 - }`, +func TestDefaultExplainRequestWithGroupByOnParent(t *testing.T) { + test := testUtils.TestCase{ - `{ - "name": "John's Twin", - "age": 65 + Description: "Explain (default) request with group-by on parent.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author (groupBy: [age]) { + age + _group { + name + } + } }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ "groupByFields": []string{"age"}, "childSelects": []dataMap{ - { - "collectionName": "Author", - "docKeys": nil, - "groupBy": nil, - "limit": nil, - "orderBy": nil, - "filter": nil, - }, - }, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, + emptyChildSelectsAttributeForAuthor, }, }, }, @@ -86,71 +66,38 @@ func TestExplainSimpleGroupByOnParent(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } -func TestExplainGroupByTwoFieldsOnParent(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain a grouping by two fields.", - - Request: `query @explain { - Author (groupBy: [age, name]) { - age - _group { - name - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "age": 65 - }`, +func TestDefaultExplainRequestWithGroupByTwoFieldsOnParent(t *testing.T) { + test := testUtils.TestCase{ - `{ - "name": "Cornelia Funke", - "age": 62 - }`, + Description: "Explain (default) request with group-by two fields on parent.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, - `{ - "name": "John's Twin", - "age": 65 + testUtils.ExplainRequest{ + + Request: `query @explain { + Author (groupBy: [age, name]) { + age + _group { + name + } + } }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ "groupByFields": []string{"age", "name"}, "childSelects": []dataMap{ - { - "collectionName": "Author", - "docKeys": nil, - "groupBy": nil, - "limit": nil, - "orderBy": nil, - "filter": nil, - }, - }, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, + emptyChildSelectsAttributeForAuthor, }, }, }, @@ -159,5 +106,5 @@ func TestExplainGroupByTwoFieldsOnParent(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/group_with_average_test.go b/tests/integration/explain/default/group_with_average_test.go index 84a6a08165..32f935785d 100644 --- a/tests/integration/explain/default/group_with_average_test.go +++ b/tests/integration/explain/default/group_with_average_test.go @@ -14,112 +14,100 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) -func TestExplainGroupByWithAverageOnAnInnerField(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain a groupBy with average on an field.", - - Request: `query @explain { - Author (groupBy: [name]) { - name - _avg(_group: {field: age}) - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 - }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, +var groupAveragePattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "averageNode": dataMap{ + "countNode": dataMap{ + "sumNode": dataMap{ + "groupNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, + }, + }, + }, + }, }, }, + }, +} + +func TestDefaultExplainRequestWithGroupByWithAverageOnAnInnerField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with group-by with average on inner field.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "averageNode": dataMap{ - "countNode": dataMap{ - "sources": []dataMap{ - { - "fieldName": "_group", - "filter": dataMap{ - "age": dataMap{ - "_ne": nil, - }, + testUtils.ExplainRequest{ + + Request: `query @explain { + Author (groupBy: [name]) { + name + _avg(_group: {field: age}) + } + }`, + + ExpectedPatterns: []dataMap{groupAveragePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "docKeys": nil, + "groupBy": nil, + "limit": nil, + "orderBy": nil, + "filter": dataMap{ + "age": dataMap{ + "_ne": nil, }, }, }, - "sumNode": dataMap{ - "sources": []dataMap{ - { - "childFieldName": "age", - "fieldName": "_group", - "filter": dataMap{ - "age": dataMap{ - "_ne": nil, - }, - }, + }, + }, + }, + { + TargetNodeName: "averageNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{}, // no attributes + }, + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "_group", + "filter": dataMap{ + "age": dataMap{ + "_ne": nil, }, }, - "groupNode": dataMap{ - "childSelects": []dataMap{ - { - "collectionName": "Author", - "docKeys": nil, - "groupBy": nil, - "limit": nil, - "orderBy": nil, - "filter": dataMap{ - "age": dataMap{ - "_ne": nil, - }, - }, - }, - }, - "groupByFields": []string{"name"}, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, + }, + }, + }, + }, + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "childFieldName": "age", + "fieldName": "_group", + "filter": dataMap{ + "age": dataMap{ + "_ne": nil, }, }, }, @@ -131,107 +119,76 @@ func TestExplainGroupByWithAverageOnAnInnerField(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } -func TestExplainGroupByWithAnAverageInsideTheInnerGroupOnAField(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain a groupBy with average of inside the inner group (on a field).", - - Request: `query @explain { - Author (groupBy: [name]) { - name - _avg(_group: {field: _avg}) - _group(groupBy: [verified]) { - verified - _avg(_group: {field: age}) - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 - }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 +func TestDefaultExplainRequestWithAverageInsideTheInnerGroupOnAField(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with group-by with average of the inner _group on a field.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author (groupBy: [name]) { + name + _avg(_group: {field: _avg}) + _group(groupBy: [verified]) { + verified + _avg(_group: {field: age}) + } + } }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "averageNode": dataMap{ - "countNode": dataMap{ - "sources": []dataMap{ - { - "fieldName": "_group", - "filter": nil, - }, + ExpectedPatterns: []dataMap{groupAveragePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "groupBy": []string{"verified", "name"}, + "docKeys": nil, + "limit": nil, + "orderBy": nil, + "filter": nil, }, - "sumNode": dataMap{ - "sources": []dataMap{ - { - "childFieldName": "_avg", - "fieldName": "_group", - "filter": nil, - }, - }, - "groupNode": dataMap{ - "childSelects": []dataMap{ - { - "collectionName": "Author", - "groupBy": []string{"verified", "name"}, - "docKeys": nil, - "filter": nil, - "limit": nil, - "orderBy": nil, - }, - }, - "groupByFields": []string{"name"}, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - }, + }, + }, + }, + { + TargetNodeName: "averageNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{}, // no attributes + }, + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "_group", + "filter": nil, + }, + }, + }, + }, + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "childFieldName": "_avg", + "fieldName": "_group", + "filter": nil, }, }, }, @@ -241,110 +198,79 @@ func TestExplainGroupByWithAnAverageInsideTheInnerGroupOnAField(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } -func TestExplainGroupByWithAnAverageInsideTheInnerGroupAndNestedGroupBy(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain a groupBy with average of inside the inner group with nested groupBy.", +func TestDefaultExplainRequestWithAverageInsideTheInnerGroupOnAFieldAndNestedGroupBy(t *testing.T) { + test := testUtils.TestCase{ - Request: `query @explain { - Author (groupBy: [name]) { - name - _avg(_group: {field: _avg}) - _group(groupBy: [verified]) { - verified - _avg(_group: {field: age}) - _group (groupBy: [age]){ - age + Description: "Explain (default) request with group-by with average of the inner _group on a field and nested group-by.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author (groupBy: [name]) { + name + _avg(_group: {field: _avg}) + _group(groupBy: [verified]) { + verified + _avg(_group: {field: age}) + _group (groupBy: [age]){ + age + } } - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 + } }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "averageNode": dataMap{ - "countNode": dataMap{ - "sources": []dataMap{ - { - "fieldName": "_group", - "filter": nil, - }, + ExpectedPatterns: []dataMap{groupAveragePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "groupBy": []string{"verified", "name"}, + "docKeys": nil, + "limit": nil, + "orderBy": nil, + "filter": nil, }, - "sumNode": dataMap{ - "sources": []dataMap{ - { - "childFieldName": "_avg", - "fieldName": "_group", - "filter": nil, - }, - }, - "groupNode": dataMap{ - "childSelects": []dataMap{ - { - "collectionName": "Author", - "groupBy": []string{"verified", "name"}, - "docKeys": nil, - "filter": nil, - "limit": nil, - "orderBy": nil, - }, - }, - "groupByFields": []string{"name"}, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - }, + }, + }, + }, + { + TargetNodeName: "averageNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{}, // no attributes + }, + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "_group", + "filter": nil, + }, + }, + }, + }, + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "childFieldName": "_avg", + "fieldName": "_group", + "filter": nil, }, }, }, @@ -354,111 +280,80 @@ func TestExplainGroupByWithAnAverageInsideTheInnerGroupAndNestedGroupBy(t *testi }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } -func TestExplainGroupByWihAnAverageInsideTheInnerGroupAndNestedGroupByWithAnAverage(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain a groupBy with average of inside the inner group with nested groupBy with and average.", +func TestDefaultExplainRequestWithAverageInsideTheInnerGroupAndNestedGroupByWithAverage(t *testing.T) { + test := testUtils.TestCase{ - Request: `query @explain { - Author (groupBy: [name]) { - name - _avg(_group: {field: _avg}) - _group(groupBy: [verified]) { - verified - _avg(_group: {field: age}) - _group (groupBy: [age]){ - age - _avg(_group: {field: age}) + Description: "Explain (default) request with average inside the inner _group and nested groupBy with average.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author (groupBy: [name]) { + name + _avg(_group: {field: _avg}) + _group(groupBy: [verified]) { + verified + _avg(_group: {field: age}) + _group (groupBy: [age]){ + age + _avg(_group: {field: age}) + } } - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 + } }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "averageNode": dataMap{ - "countNode": dataMap{ - "sources": []dataMap{ - { - "fieldName": "_group", - "filter": nil, - }, + ExpectedPatterns: []dataMap{groupAveragePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "groupBy": []string{"verified", "name"}, + "docKeys": nil, + "limit": nil, + "orderBy": nil, + "filter": nil, }, - "sumNode": dataMap{ - "sources": []dataMap{ - { - "childFieldName": "_avg", - "fieldName": "_group", - "filter": nil, - }, - }, - "groupNode": dataMap{ - "childSelects": []dataMap{ - { - "collectionName": "Author", - "groupBy": []string{"verified", "name"}, - "docKeys": nil, - "filter": nil, - "limit": nil, - "orderBy": nil, - }, - }, - "groupByFields": []string{"name"}, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - }, + }, + }, + }, + { + TargetNodeName: "averageNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{}, // no attributes + }, + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "_group", + "filter": nil, + }, + }, + }, + }, + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "childFieldName": "_avg", + "fieldName": "_group", + "filter": nil, }, }, }, @@ -468,5 +363,5 @@ func TestExplainGroupByWihAnAverageInsideTheInnerGroupAndNestedGroupByWithAnAver }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/group_with_dockey_child_test.go b/tests/integration/explain/default/group_with_dockey_child_test.go new file mode 100644 index 0000000000..35726cda32 --- /dev/null +++ b/tests/integration/explain/default/group_with_dockey_child_test.go @@ -0,0 +1,82 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_default + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDefaultExplainRequestWithDockeysOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with dockeys on inner _group.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + groupBy: [age] + ) { + age + _group(dockeys: ["bae-6a4c5bc5-b044-5a03-a868-8260af6f2254"]) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"age"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "docKeys": []string{"bae-6a4c5bc5-b044-5a03-a868-8260af6f2254"}, + "filter": nil, + "groupBy": nil, + "limit": nil, + "orderBy": nil, + }, + }, + }, + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/default/group_with_dockey_test.go b/tests/integration/explain/default/group_with_dockey_test.go index e85c92ffe2..31555bc94a 100644 --- a/tests/integration/explain/default/group_with_dockey_test.go +++ b/tests/integration/explain/default/group_with_dockey_test.go @@ -14,74 +14,55 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) -func TestExplainQueryWithDockeysFilterOnInnerGroupBy(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain query with a dockeys filter on inner _group.", +func TestDefaultExplainRequestWithDockeyOnParentGroupBy(t *testing.T) { + test := testUtils.TestCase{ - Request: `query @explain { - Author( - groupBy: [age] - ) { - age - _group(dockeys: ["bae-6a4c5bc5-b044-5a03-a868-8260af6f2254"]) { - name - } - } - }`, + Description: "Explain (default) request with a dockey on parent groupBy.", - Docs: map[int][]string{ - //authors - 2: { - // dockey: "bae-21a6ad4a-1cd8-5613-807c-a90c7c12f880" - `{ - "name": "John Grisham", - "age": 12 - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - // dockey: "bae-6a4c5bc5-b044-5a03-a868-8260af6f2254" - `{ - "name": "Cornelia Funke", - "age": 20 - }`, + testUtils.ExplainRequest{ - // dockey: "bae-4ea9d148-13f3-5a48-a0ef-9ffd344caeed" - `{ - "name": "John's Twin", - "age": 65 + Request: `query @explain { + Author( + groupBy: [age], + dockey: "bae-6a4c5bc5-b044-5a03-a868-8260af6f2254" + ) { + age + _group { + name + } + } }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"age"}, "childSelects": []dataMap{ - { - "collectionName": "Author", - "docKeys": []string{"bae-6a4c5bc5-b044-5a03-a868-8260af6f2254"}, - "filter": nil, - "groupBy": nil, - "limit": nil, - "orderBy": nil, - }, + emptyChildSelectsAttributeForAuthor, }, - "groupByFields": []string{"age"}, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, + }, + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3/bae-6a4c5bc5-b044-5a03-a868-8260af6f2254", + "end": "/3/bae-6a4c5bc5-b044-5a03-a868-8260af6f2255", }, }, }, @@ -91,167 +72,67 @@ func TestExplainQueryWithDockeysFilterOnInnerGroupBy(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } -func TestExplainQueryWithDockeyOnParentGroupBy(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain query with a dockey on parent groupBy.", - - Request: `query @explain { - Author( - groupBy: [age], - dockey: "bae-6a4c5bc5-b044-5a03-a868-8260af6f2254" - ) { - age - _group { - name - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - // dockey: "bae-21a6ad4a-1cd8-5613-807c-a90c7c12f880" - `{ - "name": "John Grisham", - "age": 12 +func TestDefaultExplainRequestWithDockeysAndFilterOnParentGroupBy(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with dockeys and filter on parent groupBy.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + groupBy: [age], + filter: {age: {_eq: 20}}, + dockeys: [ + "bae-6a4c5bc5-b044-5a03-a868-8260af6f2254", + "bae-4ea9d148-13f3-5a48-a0ef-9ffd344caeed" + ] + ) { + age + _group { + name + } + } }`, - // dockey: "bae-6a4c5bc5-b044-5a03-a868-8260af6f2254" - `{ - "name": "Cornelia Funke", - "age": 20 - }`, + ExpectedPatterns: []dataMap{groupPattern}, - // dockey: "bae-4ea9d148-13f3-5a48-a0ef-9ffd344caeed" - `{ - "name": "John's Twin", - "age": 65 - }`, - }, - }, - - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ - "childSelects": []dataMap{ - { - "collectionName": "Author", - "docKeys": nil, - "filter": nil, - "groupBy": nil, - "limit": nil, - "orderBy": nil, - }, - }, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ "groupByFields": []string{"age"}, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3/bae-6a4c5bc5-b044-5a03-a868-8260af6f2254", - "end": "/3/bae-6a4c5bc5-b044-5a03-a868-8260af6f2255", - }, - }, - }, + "childSelects": []dataMap{ + emptyChildSelectsAttributeForAuthor, }, }, }, - }, - }, - }, - } - - executeTestCase(t, test) -} - -func TestExplainQuerySimpleWithDockeysAndFilter(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain query with a dockeys and filter on parent groupBy.", - - Request: `query @explain { - Author( - groupBy: [age], - filter: {age: {_eq: 20}}, - dockeys: [ - "bae-6a4c5bc5-b044-5a03-a868-8260af6f2254", - "bae-4ea9d148-13f3-5a48-a0ef-9ffd344caeed" - ] - ) { - age - _group { - name - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - // dockey: "bae-21a6ad4a-1cd8-5613-807c-a90c7c12f880" - `{ - "name": "John Grisham", - "age": 12 - }`, - - // dockey: "bae-6a4c5bc5-b044-5a03-a868-8260af6f2254" - `{ - "name": "Cornelia Funke", - "age": 20 - }`, - - // dockey: "bae-4ea9d148-13f3-5a48-a0ef-9ffd344caeed" - `{ - "name": "John's Twin", - "age": 65 - }`, - }, - }, - - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ - "childSelects": []dataMap{ - { - "collectionName": "Author", - "docKeys": nil, - "groupBy": nil, - "limit": nil, - "orderBy": nil, - "filter": nil, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "age": dataMap{ + "_eq": int32(20), }, }, - "groupByFields": []string{"age"}, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_eq": int32(20), - }, - }, - "spans": []dataMap{ - { - "start": "/3/bae-6a4c5bc5-b044-5a03-a868-8260af6f2254", - "end": "/3/bae-6a4c5bc5-b044-5a03-a868-8260af6f2255", - }, - { - "start": "/3/bae-4ea9d148-13f3-5a48-a0ef-9ffd344caeed", - "end": "/3/bae-4ea9d148-13f3-5a48-a0ef-9ffd344caeee", - }, - }, + "spans": []dataMap{ + { + "start": "/3/bae-6a4c5bc5-b044-5a03-a868-8260af6f2254", + "end": "/3/bae-6a4c5bc5-b044-5a03-a868-8260af6f2255", + }, + { + "start": "/3/bae-4ea9d148-13f3-5a48-a0ef-9ffd344caeed", + "end": "/3/bae-4ea9d148-13f3-5a48-a0ef-9ffd344caeee", }, }, }, @@ -261,5 +142,5 @@ func TestExplainQuerySimpleWithDockeysAndFilter(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/group_with_filter_child_test.go b/tests/integration/explain/default/group_with_filter_child_test.go new file mode 100644 index 0000000000..bcb53e0e86 --- /dev/null +++ b/tests/integration/explain/default/group_with_filter_child_test.go @@ -0,0 +1,157 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_default + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDefaultExplainRequestWithFilterOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with filter on the inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author (groupBy: [age]) { + age + _group(filter: {age: {_gt: 63}}) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"age"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "docKeys": nil, + "filter": dataMap{ + "age": dataMap{ + "_gt": int32(63), + }, + }, + "groupBy": nil, + "limit": nil, + "orderBy": nil, + }, + }, + }, + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "filter": nil, + "collectionID": "3", + "collectionName": "Author", + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDefaultExplainRequestWithFilterOnParentGroupByAndInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with filter on parent groupBy and on the inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author ( + groupBy: [age], + filter: {age: {_gt: 62}} + ) { + age + _group(filter: {age: {_gt: 63}}) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"age"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "docKeys": nil, + "filter": dataMap{ + "age": dataMap{ + "_gt": int32(63), + }, + }, + "groupBy": nil, + "limit": nil, + "orderBy": nil, + }, + }, + }, + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "filter": dataMap{ + "age": dataMap{ + "_gt": int32(62), + }, + }, + "collectionID": "3", + "collectionName": "Author", + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/default/group_with_filter_test.go b/tests/integration/explain/default/group_with_filter_test.go index 52e13b9f58..6221fd0f02 100644 --- a/tests/integration/explain/default/group_with_filter_test.go +++ b/tests/integration/explain/default/group_with_filter_test.go @@ -14,153 +14,59 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) -func TestExplainGroupByWithFilterOnParent(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain a grouping with filter on parent.", +func TestDefaultExplainRequestWithFilterOnGroupByParent(t *testing.T) { + test := testUtils.TestCase{ - Request: `query @explain { - Author ( - groupBy: [age], - filter: {age: {_gt: 63}} - ) { - age - _group { - name - } - } - }`, + Description: "Explain (default) request with filter on parent groupBy.", - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "age": 65 - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - `{ - "name": "Cornelia Funke", - "age": 62 - }`, + testUtils.ExplainRequest{ - `{ - "name": "John's Twin", - "age": 65 - }`, - }, - }, + Request: `query @explain { + Author ( + groupBy: [age], + filter: {age: {_gt: 63}} + ) { + age + _group { + name + } + } + }`, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ "groupByFields": []string{"age"}, "childSelects": []dataMap{ - { - "collectionName": "Author", - "docKeys": nil, - "groupBy": nil, - "limit": nil, - "orderBy": nil, - "filter": nil, - }, - }, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_gt": int32(63), - }, - }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, + emptyChildSelectsAttributeForAuthor, }, }, }, - }, - }, - }, - } - - executeTestCase(t, test) -} - -func TestExplainGroupByWithFilterOnInnerGroupSelection(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain a grouping with filter on the inner group selection.", - - Request: `query @explain { - Author (groupBy: [age]) { - age - _group(filter: {age: {_gt: 63}}) { - name - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "age": 65 - }`, - - `{ - "name": "Cornelia Funke", - "age": 62 - }`, - - `{ - "name": "John's Twin", - "age": 65 - }`, - }, - }, - - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ - "groupByFields": []string{"age"}, - "childSelects": []dataMap{ - { - "collectionName": "Author", - "docKeys": nil, - "groupBy": nil, - "limit": nil, - "orderBy": nil, - "filter": dataMap{ - "age": dataMap{ - "_gt": int32(63), - }, - }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "age": dataMap{ + "_gt": int32(63), }, }, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "filter": nil, - "collectionID": "3", - "collectionName": "Author", - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", }, }, }, @@ -170,5 +76,5 @@ func TestExplainGroupByWithFilterOnInnerGroupSelection(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/group_with_limit_child_test.go b/tests/integration/explain/default/group_with_limit_child_test.go new file mode 100644 index 0000000000..13d4730638 --- /dev/null +++ b/tests/integration/explain/default/group_with_limit_child_test.go @@ -0,0 +1,132 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_default + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDefaultExplainRequestWithLimitAndOffsetOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with limit and offset on inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(groupBy: [name]) { + name + _group(limit: 2, offset: 1) { + age + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "limit": dataMap{ + "limit": uint64(2), + "offset": uint64(1), + }, + "docKeys": nil, + "filter": nil, + "groupBy": nil, + "orderBy": nil, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDefaultExplainRequestWithLimitAndOffsetOnMultipleInnerGroupSelections(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with limit and offset on multiple inner _group selections.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(groupBy: [name]) { + name + innerFirstGroup: _group(limit: 1, offset: 2) { + age + } + innerSecondGroup: _group(limit: 2) { + age + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "limit": dataMap{ + "limit": uint64(1), + "offset": uint64(2), + }, + "docKeys": nil, + "filter": nil, + "groupBy": nil, + "orderBy": nil, + }, + { + "collectionName": "Author", + "limit": dataMap{ + "limit": uint64(2), + "offset": uint64(0), + }, + "docKeys": nil, + "filter": nil, + "groupBy": nil, + "orderBy": nil, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/default/group_with_limit_test.go b/tests/integration/explain/default/group_with_limit_test.go index c12f02531e..967cda469c 100644 --- a/tests/integration/explain/default/group_with_limit_test.go +++ b/tests/integration/explain/default/group_with_limit_test.go @@ -14,394 +14,126 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) -func TestExplainGroupByWithGroupLimitAndOffsetOnParentGroupBy(t *testing.T) { - test := testUtils.RequestTestCase{ - - Description: "Explain query with limit and offset on parent groupBy.", - - Request: `query @explain { - Author( - groupBy: [name], - limit: 1, - offset: 1 - ) { - name - _group { - age - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 - }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - }, - }, - - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "limitNode": dataMap{ - "limit": uint64(1), - "offset": uint64(1), - "groupNode": dataMap{ - "groupByFields": []string{"name"}, - "childSelects": []dataMap{ - { - "collectionName": "Author", - "orderBy": nil, - "docKeys": nil, - "groupBy": nil, - "limit": nil, - "filter": nil, - }, - }, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - }, - }, +var groupLimitPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "limitNode": dataMap{ + "groupNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, }, }, }, }, - } - - executeTestCase(t, test) + }, } -func TestExplainGroupByWithGroupLimitAndOffsetOnChild(t *testing.T) { - test := testUtils.RequestTestCase{ +func TestDefaultExplainRequestWithLimitAndOffsetOnParentGroupBy(t *testing.T) { + test := testUtils.TestCase{ - Description: "Explain query with limit and offset on child groupBy.", + Description: "Explain (default) request with limit and offset on parent groupBy.", - Request: `query @explain { - Author(groupBy: [name]) { - name - _group(limit: 2, offset: 1) { - age - } - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 - }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + groupBy: [name], + limit: 1, + offset: 1 + ) { + name + _group { + age + } + } }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ - "childSelects": []dataMap{ - { - "collectionName": "Author", - "limit": dataMap{ - "limit": uint64(2), - "offset": uint64(1), - }, - "docKeys": nil, - "filter": nil, - "groupBy": nil, - "orderBy": nil, - }, - }, + ExpectedPatterns: []dataMap{groupLimitPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ "groupByFields": []string{"name"}, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, + "childSelects": []dataMap{ + emptyChildSelectsAttributeForAuthor, }, }, }, + { + TargetNodeName: "limitNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": uint64(1), + "offset": uint64(1), + }, + }, }, }, }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } -func TestExplainGroupByWithGroupLimitOnChildMultipleRendered(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain query with limit on child groupBy (multiple rendered).", +func TestDefaultExplainRequestWithLimitOnParentGroupByAndInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ - Request: `query @explain { - Author(groupBy: [name]) { - name - innerFirstGroup: _group(limit: 1, offset: 2) { - age - } - innerSecondGroup: _group(limit: 2) { - age - } - } - }`, + Description: "Explain (default) request with limit and offset on parent groupBy and inner _group selection.", - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 - }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + groupBy: [name], + limit: 1 + ) { + name + _group(limit: 2) { + age + } + } }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ + ExpectedPatterns: []dataMap{groupLimitPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, "childSelects": []dataMap{ - { - "collectionName": "Author", - "limit": dataMap{ - "limit": uint64(1), - "offset": uint64(2), - }, - "docKeys": nil, - "filter": nil, - "groupBy": nil, - "orderBy": nil, - }, { "collectionName": "Author", "limit": dataMap{ "limit": uint64(2), "offset": uint64(0), }, + "orderBy": nil, "docKeys": nil, - "filter": nil, "groupBy": nil, - "orderBy": nil, - }, - }, - "groupByFields": []string{"name"}, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, + "filter": nil, }, }, }, }, - }, - }, - }, - } - - executeTestCase(t, test) -} - -func TestExplainGroupByWithGroupLimitOnParentAndChild(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain query with limit on parent and child groupBy.", - - Request: `query @explain { - Author( - groupBy: [name], - limit: 1 - ) { - name - _group(limit: 2) { - age - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 - }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - }, - }, - - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "limitNode": dataMap{ + { + TargetNodeName: "limitNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ "limit": uint64(1), "offset": uint64(0), - "groupNode": dataMap{ - "groupByFields": []string{"name"}, - "childSelects": []dataMap{ - { - "collectionName": "Author", - "limit": dataMap{ - "limit": uint64(2), - "offset": uint64(0), - }, - "orderBy": nil, - "docKeys": nil, - "groupBy": nil, - "filter": nil, - }, - }, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - }, }, }, }, @@ -409,5 +141,5 @@ func TestExplainGroupByWithGroupLimitOnParentAndChild(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/group_with_order_child_test.go b/tests/integration/explain/default/group_with_order_child_test.go new file mode 100644 index 0000000000..55d14ef469 --- /dev/null +++ b/tests/integration/explain/default/group_with_order_child_test.go @@ -0,0 +1,180 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_default + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDefaultExplainRequestWithDescendingOrderOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with order (descending) on inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(groupBy: [name]) { + name + _group (order: {age: DESC}){ + age + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "orderBy": []dataMap{ + { + "direction": "DESC", + "fields": []string{"age"}, + }, + }, + "docKeys": nil, + "groupBy": nil, + "limit": nil, + "filter": nil, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDefaultExplainRequestWithAscendingOrderOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with order (ascending) on inner _group selection.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(groupBy: [name]) { + name + _group (order: {age: ASC}){ + age + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "orderBy": []dataMap{ + { + "direction": "ASC", + "fields": []string{"age"}, + }, + }, + "docKeys": nil, + "groupBy": nil, + "limit": nil, + "filter": nil, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDefaultExplainRequestWithOrderOnNestedParentGroupByAndOnNestedParentsInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with order on nested parent groupBy and on nested parent's inner _group.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(groupBy: [name]) { + name + _group ( + groupBy: [verified], + order: {verified: ASC} + ){ + verified + _group (order: {age: DESC}) { + age + } + } + } + }`, + + ExpectedPatterns: []dataMap{groupPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, + "childSelects": []dataMap{ + { + "collectionName": "Author", + "orderBy": []dataMap{ + { + "direction": "ASC", + "fields": []string{"verified"}, + }, + }, + "groupBy": []string{"verified", "name"}, + "docKeys": nil, + "limit": nil, + "filter": nil, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/default/group_with_order_test.go b/tests/integration/explain/default/group_with_order_test.go index e5f13aee68..7de88087df 100644 --- a/tests/integration/explain/default/group_with_order_test.go +++ b/tests/integration/explain/default/group_with_order_test.go @@ -14,187 +14,66 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) -func TestExplainGroupByWithOrderOnParentGroup(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain query with ordered parent groupBy.", - - Request: `query @explain { - Author(groupBy: [name], order: {name: DESC}) { - name - _group { - age - } - } - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 - }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - }, - }, - - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "orderNode": dataMap{ - "orderings": []dataMap{ - { - "direction": "DESC", - "fields": []string{"name"}, - }, - }, - "groupNode": dataMap{ - "groupByFields": []string{"name"}, - "childSelects": []dataMap{ - { - "collectionName": "Author", - "docKeys": nil, - "orderBy": nil, - "groupBy": nil, - "limit": nil, - "filter": nil, - }, - }, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - }, - }, +var groupOrderPattern = dataMap{ + "explain": dataMap{ + "selectTopNode": dataMap{ + "orderNode": dataMap{ + "groupNode": dataMap{ + "selectNode": dataMap{ + "scanNode": dataMap{}, }, }, }, }, - } - - executeTestCase(t, test) + }, } -func TestExplainGroupByWithOrderOnTheChildGroup(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain query with groupBy string, and child order ascending.", +func TestDefaultExplainRequestWithDescendingOrderOnParentGroupBy(t *testing.T) { + test := testUtils.TestCase{ - Request: `query @explain { - Author(groupBy: [name]) { - name - _group (order: {age: ASC}){ - age - } - } - }`, + Description: "Explain (default) request with order (descending) on parent groupBy.", - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 - }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + groupBy: [name], + order: {name: DESC} + ) { + name + _group { + age + } + } }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ + ExpectedPatterns: []dataMap{groupOrderPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ "groupByFields": []string{"name"}, "childSelects": []dataMap{ - { - "collectionName": "Author", - "orderBy": []dataMap{ - { - "direction": "ASC", - "fields": []string{"age"}, - }, - }, - "docKeys": nil, - "groupBy": nil, - "limit": nil, - "filter": nil, - }, + emptyChildSelectsAttributeForAuthor, }, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, + }, + }, + { + TargetNodeName: "orderNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "orderings": []dataMap{ + { + "direction": "DESC", + "fields": []string{"name"}, }, }, }, @@ -204,101 +83,54 @@ func TestExplainGroupByWithOrderOnTheChildGroup(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } -func TestExplainGroupByWithOrderOnTheChildGroupAndOnParentGroup(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain query with parent groupBy order, and child order.", +func TestDefaultExplainRequestWithAscendingOrderOnParentGroupBy(t *testing.T) { + test := testUtils.TestCase{ - Request: `query @explain { - Author(groupBy: [name], order: {name: DESC}) { - name - _group (order: {age: ASC}){ - age - } - } - }`, + Description: "Explain (default) request with order (ascending) on parent groupBy.", - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 - }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + groupBy: [name], + order: {name: ASC} + ) { + name + _group { + age + } + } }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "orderNode": dataMap{ + ExpectedPatterns: []dataMap{groupOrderPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "groupByFields": []string{"name"}, + "childSelects": []dataMap{ + emptyChildSelectsAttributeForAuthor, + }, + }, + }, + { + TargetNodeName: "orderNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ "orderings": []dataMap{ { - "direction": "DESC", + "direction": "ASC", "fields": []string{"name"}, }, }, - "groupNode": dataMap{ - "groupByFields": []string{"name"}, - "childSelects": []dataMap{ - { - "collectionName": "Author", - "orderBy": []dataMap{ - { - "direction": "ASC", - "fields": []string{"age"}, - }, - }, - "docKeys": nil, - "groupBy": nil, - "limit": nil, - "filter": nil, - }, - }, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - }, }, }, }, @@ -306,69 +138,38 @@ func TestExplainGroupByWithOrderOnTheChildGroupAndOnParentGroup(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } -func TestExplainGroupByWithOrderOnTheNestedChildOfChildGroup(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain query with parent groupBy order, and child order.", +func TestDefaultExplainRequestWithOrderOnParentGroupByAndOnInnerGroupSelection(t *testing.T) { + test := testUtils.TestCase{ - Request: `query @explain { - Author(groupBy: [name]) { - name - _group ( - groupBy: [verified], - order: {verified: ASC} - ){ - verified - _group (order: {age: DESC}) { - age - } - } - } - }`, + Description: "Explain (default) request with order on parent groupBy and inner _group selection.", - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John Grisham", - "verified": true, - "age": 65 - }`, - `{ - "name": "John Grisham", - "verified": false, - "age": 2 - }`, - `{ - "name": "John Grisham", - "verified": true, - "age": 50 - }`, - `{ - "name": "Cornelia Funke", - "verified": true, - "age": 62 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 - }`, - `{ - "name": "Twin", - "verified": true, - "age": 63 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + groupBy: [name], + order: {name: DESC} + ) { + name + _group (order: {age: ASC}){ + age + } + } }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "groupNode": dataMap{ + ExpectedPatterns: []dataMap{groupOrderPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "groupNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ "groupByFields": []string{"name"}, "childSelects": []dataMap{ { @@ -376,27 +177,25 @@ func TestExplainGroupByWithOrderOnTheNestedChildOfChildGroup(t *testing.T) { "orderBy": []dataMap{ { "direction": "ASC", - "fields": []string{"verified"}, + "fields": []string{"age"}, }, }, - "groupBy": []string{"verified", "name"}, "docKeys": nil, + "groupBy": nil, "limit": nil, "filter": nil, }, }, - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, + }, + }, + { + TargetNodeName: "orderNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "orderings": []dataMap{ + { + "direction": "DESC", + "fields": []string{"name"}, }, }, }, @@ -406,5 +205,5 @@ func TestExplainGroupByWithOrderOnTheNestedChildOfChildGroup(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/invalid_type_arg_test.go b/tests/integration/explain/default/invalid_type_arg_test.go index fb35b79005..391d56492a 100644 --- a/tests/integration/explain/default/invalid_type_arg_test.go +++ b/tests/integration/explain/default/invalid_type_arg_test.go @@ -13,32 +13,31 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestInvalidExplainRequestTypeReturnsError(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Invalid type of explain request should error.", - Request: `query @explain(type: invalid) { - Author { - _key - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "John", - "age": 21 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain(type: invalid) { + Author { + _key + name + age + } }`, + + ExpectedError: "Argument \"type\" has invalid value invalid.\nExpected type \"ExplainType\", found invalid.", }, }, - - ExpectedError: "Argument \"type\" has invalid value invalid.\nExpected type \"ExplainType\", found invalid.", } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/top_with_average_test.go b/tests/integration/explain/default/top_with_average_test.go index 6c1127c5b5..249aa209e9 100644 --- a/tests/integration/explain/default/top_with_average_test.go +++ b/tests/integration/explain/default/top_with_average_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -40,204 +41,181 @@ var topLevelAveragePattern = dataMap{ } func TestDefaultExplainTopLevelAverageRequest(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) top-level average request with filter.", - Request: `query @explain { - _avg( - Author: { - field: age - } - ) - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John", - "verified": false, - "age": 28 - }`, - `{ - "name": "Bob", - "verified": true, - "age": 30 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + _avg( + Author: { + field: age + } + ) }`, - }, - }, - ExpectedPatterns: []dataMap{topLevelAveragePattern}, + ExpectedPatterns: []dataMap{topLevelAveragePattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_ne": nil, - }, - }, - "spans": []dataMap{ - { - "end": "/4", - "start": "/3", - }, - }, - }, - }, - { - TargetNodeName: "sumNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "childFieldName": "age", - "fieldName": "Author", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", "filter": dataMap{ "age": dataMap{ "_ne": nil, }, }, + "spans": []dataMap{ + { + "end": "/4", + "start": "/3", + }, + }, }, }, - }, - }, - { - TargetNodeName: "countNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_ne": nil, + { + TargetNodeName: "sumNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "childFieldName": "age", + "fieldName": "Author", + "filter": dataMap{ + "age": dataMap{ + "_ne": nil, + }, + }, }, }, }, }, + { + TargetNodeName: "countNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "Author", + "filter": dataMap{ + "age": dataMap{ + "_ne": nil, + }, + }, + }, + }, + }, + }, + { + TargetNodeName: "averageNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{}, // no attributes + }, }, }, - { - TargetNodeName: "averageNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{}, // no attributes - }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainTopLevelAverageRequestWithFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) top-level average request with filter.", - Request: `query @explain { - _avg( - Author: { - field: age, - filter: { - age: { - _gt: 26 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + _avg( + Author: { + field: age, + filter: { + age: { + _gt: 26 + } + } } - } - } - ) - }`, - - Docs: map[int][]string{ - //authors - 2: { - `{ - "name": "John", - "verified": false, - "age": 21 - }`, - `{ - "name": "Bob", - "verified": false, - "age": 30 + ) }`, - `{ - "name": "Alice", - "verified": false, - "age": 32 - }`, - }, - }, - ExpectedPatterns: []dataMap{topLevelAveragePattern}, + ExpectedPatterns: []dataMap{topLevelAveragePattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_gt": int32(26), - "_ne": nil, - }, - }, - "spans": []dataMap{ - { - "end": "/4", - "start": "/3", - }, - }, - }, - }, - { - TargetNodeName: "sumNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "childFieldName": "age", - "fieldName": "Author", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", "filter": dataMap{ "age": dataMap{ "_gt": int32(26), "_ne": nil, }, }, + "spans": []dataMap{ + { + "end": "/4", + "start": "/3", + }, + }, }, }, - }, - }, - { - TargetNodeName: "countNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_gt": int32(26), - "_ne": nil, + { + TargetNodeName: "sumNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "childFieldName": "age", + "fieldName": "Author", + "filter": dataMap{ + "age": dataMap{ + "_gt": int32(26), + "_ne": nil, + }, + }, }, }, }, }, + { + TargetNodeName: "countNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "Author", + "filter": dataMap{ + "age": dataMap{ + "_gt": int32(26), + "_ne": nil, + }, + }, + }, + }, + }, + }, + { + TargetNodeName: "averageNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{}, // no attributes + }, }, }, - { - TargetNodeName: "averageNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{}, // no attributes - }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/top_with_count_test.go b/tests/integration/explain/default/top_with_count_test.go index 94fb0e7de1..8129013911 100644 --- a/tests/integration/explain/default/top_with_count_test.go +++ b/tests/integration/explain/default/top_with_count_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -34,56 +35,47 @@ var topLevelCountPattern = dataMap{ } func TestDefaultExplainTopLevelCountRequest(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) top-level count request.", - Request: `query @explain { - _count(Author: {}) - }`, - - Docs: map[int][]string{ - //Authors - 2: { - `{ - "name": "John", - "verified": true, - "age": 21 - }`, - `{ - "name": "Bob", - "verified": false, - "age": 30 - }`, - }, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedPatterns: []dataMap{topLevelCountPattern}, + testUtils.ExplainRequest{ - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + Request: `query @explain { + _count(Author: {}) + }`, + + ExpectedPatterns: []dataMap{topLevelCountPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, - }, - }, - { - TargetNodeName: "countNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "Author", - "filter": nil, + { + TargetNodeName: "countNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "Author", + "filter": nil, + }, + }, }, }, }, @@ -91,81 +83,67 @@ func TestDefaultExplainTopLevelCountRequest(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainTopLevelCountRequestWithFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) top-level count request with filter.", - Request: `query @explain { - _count( - Author: { - filter: { - age: { - _gt: 26 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + _count( + Author: { + filter: { + age: { + _gt: 26 + } + } } - } - } - ) - }`, - - Docs: map[int][]string{ - //Authors - 2: { - `{ - "name": "John", - "verified": false, - "age": 21 - }`, - `{ - "name": "Bob", - "verified": false, - "age": 30 + ) }`, - `{ - "name": "Alice", - "verified": true, - "age": 32 - }`, - }, - }, - ExpectedPatterns: []dataMap{topLevelCountPattern}, + ExpectedPatterns: []dataMap{topLevelCountPattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_gt": int32(26), - }, - }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - { - TargetNodeName: "countNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "Author", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", "filter": dataMap{ "age": dataMap{ "_gt": int32(26), }, }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + { + TargetNodeName: "countNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "Author", + "filter": dataMap{ + "age": dataMap{ + "_gt": int32(26), + }, + }, + }, + }, }, }, }, @@ -173,5 +151,5 @@ func TestDefaultExplainTopLevelCountRequestWithFilter(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/top_with_sum_test.go b/tests/integration/explain/default/top_with_sum_test.go index 8e4ac14891..764663a8bd 100644 --- a/tests/integration/explain/default/top_with_sum_test.go +++ b/tests/integration/explain/default/top_with_sum_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -34,61 +35,52 @@ var topLevelSumPattern = dataMap{ } func TestDefaultExplainTopLevelSumRequest(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) top-level sum request.", - Request: `query @explain { - _sum( - Author: { - field: age - } - ) - }`, - - Docs: map[int][]string{ - //Authors - 2: { - `{ - "name": "John", - "verified": true, - "age": 21 - }`, - `{ - "name": "Bob", - "verified": true, - "age": 30 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + _sum( + Author: { + field: age + } + ) }`, - }, - }, - ExpectedPatterns: []dataMap{topLevelSumPattern}, + ExpectedPatterns: []dataMap{topLevelSumPattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, - }, - }, - { - TargetNodeName: "sumNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "Author", - "childFieldName": "age", - "filter": nil, + { + TargetNodeName: "sumNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "Author", + "childFieldName": "age", + "filter": nil, + }, + }, }, }, }, @@ -96,83 +88,69 @@ func TestDefaultExplainTopLevelSumRequest(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainTopLevelSumRequestWithFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) top-level sum request with filter.", - Request: `query @explain { - _sum( - Author: { - field: age, - filter: { - age: { - _gt: 26 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + _sum( + Author: { + field: age, + filter: { + age: { + _gt: 26 + } + } } - } - } - ) - }`, - - Docs: map[int][]string{ - //Authors - 2: { - `{ - "name": "John", - "verified": false, - "age": 21 + ) }`, - `{ - "name": "Bob", - "verified": false, - "age": 30 - }`, - `{ - "name": "Alice", - "verified": true, - "age": 32 - }`, - }, - }, - ExpectedPatterns: []dataMap{topLevelSumPattern}, + ExpectedPatterns: []dataMap{topLevelSumPattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_gt": int32(26), - }, - }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - { - TargetNodeName: "sumNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "Author", - "childFieldName": "age", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", "filter": dataMap{ "age": dataMap{ "_gt": int32(26), }, }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + { + TargetNodeName: "sumNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "Author", + "childFieldName": "age", + "filter": dataMap{ + "age": dataMap{ + "_gt": int32(26), + }, + }, + }, + }, }, }, }, @@ -180,5 +158,5 @@ func TestDefaultExplainTopLevelSumRequestWithFilter(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/type_join_many_test.go b/tests/integration/explain/default/type_join_many_test.go index 6ffdbcba09..9fa66bf76a 100644 --- a/tests/integration/explain/default/type_join_many_test.go +++ b/tests/integration/explain/default/type_join_many_test.go @@ -13,165 +13,89 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestDefaultExplainRequestWithAOneToManyJoin(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with a 1-to-M join.", - Request: `query @explain { - Author { - articles { - name - } - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - // books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - }`, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, - // contact - 3: { - // _key: bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed - // "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - `{ - "cell": "5197212301", - "email": "john_grisham@example.com", - "address_id": "bae-c8448e47-6cd1-571f-90bd-364acb80da7b" - }`, - - // _key: bae-c0960a29-b704-5c37-9c2e-59e1249e4559 - // "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - `{ - "cell": "5197212302", - "email": "cornelia_funke@example.com", - "address_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, + testUtils.ExplainRequest{ - // address - 4: { - // _key: bae-c8448e47-6cd1-571f-90bd-364acb80da7b - // "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - `{ - "city": "Waterloo", - "country": "Canada" + Request: `query @explain { + Author { + articles { + name + } + } }`, - // _key: bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692 - // "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - `{ - "city": "Brampton", - "country": "Canada" - }`, - }, - }, - - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": normalTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": normalTypeJoinPattern, + }, + }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "typeIndexJoin", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "articles", - }, - }, - { - // Note: `root` is not a node but is a special case because for typeIndexJoin we - // restructure to show both `root` and `subType` at the same level. - TargetNodeName: "root", - IncludeChildNodes: true, // We care about checking children nodes. - ExpectedAttributes: dataMap{ - "scanNode": dataMap{ - "filter": nil, - "collectionID": "3", - "collectionName": "Author", - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "typeIndexJoin", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "articles", }, }, - }, - }, - { - // Note: `subType` is not a node but is a special case because for typeIndexJoin we - // restructure to show both `root` and `subType` at the same level. - TargetNodeName: "subType", - IncludeChildNodes: true, // We care about checking children nodes. - ExpectedAttributes: dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "filter": nil, + { + // Note: `root` is not a node but is a special case because for typeIndexJoin we + // restructure to show both `root` and `subType` at the same level. + TargetNodeName: "root", + IncludeChildNodes: true, // We care about checking children nodes. + ExpectedAttributes: dataMap{ "scanNode": dataMap{ "filter": nil, - "collectionID": "1", - "collectionName": "Article", + "collectionID": "3", + "collectionName": "Author", "spans": []dataMap{ { - "start": "/1", - "end": "/2", + "start": "/3", + "end": "/4", + }, + }, + }, + }, + }, + { + // Note: `subType` is not a node but is a special case because for typeIndexJoin we + // restructure to show both `root` and `subType` at the same level. + TargetNodeName: "subType", + IncludeChildNodes: true, // We care about checking children nodes. + ExpectedAttributes: dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "_keys": nil, + "filter": nil, + "scanNode": dataMap{ + "filter": nil, + "collectionID": "1", + "collectionName": "Article", + "spans": []dataMap{ + { + "start": "/1", + "end": "/2", + }, + }, }, }, }, @@ -182,5 +106,5 @@ func TestDefaultExplainRequestWithAOneToManyJoin(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/type_join_one_test.go b/tests/integration/explain/default/type_join_one_test.go index da6a89b53d..472a6f2164 100644 --- a/tests/integration/explain/default/type_join_one_test.go +++ b/tests/integration/explain/default/type_join_one_test.go @@ -13,166 +13,90 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestDefaultExplainRequestWithAOneToOneJoin(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with a 1-to-1 join.", - Request: `query @explain { - Author { - OnlyEmail: contact { - email - } - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - // books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - }`, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, - // contact - 3: { - // _key: bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed - // "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - `{ - "cell": "5197212301", - "email": "john_grisham@example.com", - "address_id": "bae-c8448e47-6cd1-571f-90bd-364acb80da7b" - }`, - - // _key: bae-c0960a29-b704-5c37-9c2e-59e1249e4559 - // "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - `{ - "cell": "5197212302", - "email": "cornelia_funke@example.com", - "address_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, + testUtils.ExplainRequest{ - // address - 4: { - // _key: bae-c8448e47-6cd1-571f-90bd-364acb80da7b - // "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - `{ - "city": "Waterloo", - "country": "Canada" + Request: `query @explain { + Author { + OnlyEmail: contact { + email + } + } }`, - // _key: bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692 - // "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - `{ - "city": "Brampton", - "country": "Canada" - }`, - }, - }, - - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": normalTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": normalTypeJoinPattern, + }, + }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "typeIndexJoin", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "direction": "primary", - "joinType": "typeJoinOne", - "rootName": "author", - "subTypeName": "contact", - }, - }, - { - // Note: `root` is not a node but is a special case because for typeIndexJoin we - // restructure to show both `root` and `subType` at the same level. - TargetNodeName: "root", - IncludeChildNodes: true, // We care about checking children nodes. - ExpectedAttributes: dataMap{ - "scanNode": dataMap{ - "filter": nil, - "collectionID": "3", - "collectionName": "Author", - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "typeIndexJoin", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "direction": "primary", + "joinType": "typeJoinOne", + "rootName": "author", + "subTypeName": "contact", }, }, - }, - }, - { - // Note: `subType` is not a node but is a special case because for typeIndexJoin we - // restructure to show both `root` and `subType` at the same level. - TargetNodeName: "subType", - IncludeChildNodes: true, // We care about checking children nodes. - ExpectedAttributes: dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "filter": nil, + { + // Note: `root` is not a node but is a special case because for typeIndexJoin we + // restructure to show both `root` and `subType` at the same level. + TargetNodeName: "root", + IncludeChildNodes: true, // We care about checking children nodes. + ExpectedAttributes: dataMap{ "scanNode": dataMap{ "filter": nil, - "collectionID": "4", - "collectionName": "AuthorContact", + "collectionID": "3", + "collectionName": "Author", "spans": []dataMap{ { - "start": "/4", - "end": "/5", + "start": "/3", + "end": "/4", + }, + }, + }, + }, + }, + { + // Note: `subType` is not a node but is a special case because for typeIndexJoin we + // restructure to show both `root` and `subType` at the same level. + TargetNodeName: "subType", + IncludeChildNodes: true, // We care about checking children nodes. + ExpectedAttributes: dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "_keys": nil, + "filter": nil, + "scanNode": dataMap{ + "filter": nil, + "collectionID": "4", + "collectionName": "AuthorContact", + "spans": []dataMap{ + { + "start": "/4", + "end": "/5", + }, + }, }, }, }, @@ -183,124 +107,46 @@ func TestDefaultExplainRequestWithAOneToOneJoin(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithTwoLevelDeepNestedJoins(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with two level deep nested joins.", - Request: `query @explain { - Author { - name - contact { - email - address { - city - } - } - } - }`, - - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - // books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - }`, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, - // contact - 3: { - // _key: bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed - // "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - `{ - "cell": "5197212301", - "email": "john_grisham@example.com", - "address_id": "bae-c8448e47-6cd1-571f-90bd-364acb80da7b" - }`, - - // _key: bae-c0960a29-b704-5c37-9c2e-59e1249e4559 - // "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - `{ - "cell": "5197212302", - "email": "cornelia_funke@example.com", - "address_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - // address - 4: { - // _key: bae-c8448e47-6cd1-571f-90bd-364acb80da7b - // "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - `{ - "city": "Waterloo", - "country": "Canada" - }`, + testUtils.ExplainRequest{ - // _key: bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692 - // "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - `{ - "city": "Brampton", - "country": "Canada" + Request: `query @explain { + Author { + name + contact { + email + address { + city + } + } + } }`, - }, - }, - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": dataMap{ - "root": dataMap{ - "scanNode": dataMap{}, - }, - "subType": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": normalTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": dataMap{ + "root": dataMap{ + "scanNode": dataMap{}, + }, + "subType": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": normalTypeJoinPattern, + }, + }, }, }, }, @@ -308,88 +154,89 @@ func TestDefaultExplainRequestWithTwoLevelDeepNestedJoins(t *testing.T) { }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 0, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "direction": "primary", - "joinType": "typeJoinOne", - "rootName": "author", - "subTypeName": "contact", - }, - }, - { - TargetNodeName: "root", - OccurancesToSkip: 0, - IncludeChildNodes: true, // We care about checking children nodes. - ExpectedAttributes: dataMap{ - "scanNode": dataMap{ - "filter": nil, - "collectionID": "3", - "collectionName": "Author", - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 0, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "direction": "primary", + "joinType": "typeJoinOne", + "rootName": "author", + "subTypeName": "contact", + }, + }, + { + TargetNodeName: "root", + OccurancesToSkip: 0, + IncludeChildNodes: true, // We care about checking children nodes. + ExpectedAttributes: dataMap{ + "scanNode": dataMap{ + "filter": nil, + "collectionID": "3", + "collectionName": "Author", + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, }, - }, - }, - // Note: the 1st `subType` will contain the entire rest of the graph so we target - // and select only the nodes we care about inside it and not `subType` itself. - - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 1, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "direction": "primary", - "joinType": "typeJoinOne", - "rootName": "contact", - "subTypeName": "address", - }, - }, - { - TargetNodeName: "root", - OccurancesToSkip: 1, - IncludeChildNodes: true, - ExpectedAttributes: dataMap{ - "scanNode": dataMap{ - "filter": nil, - "collectionID": "4", - "collectionName": "AuthorContact", - "spans": []dataMap{ - { - "start": "/4", - "end": "/5", - }, + // Note: the 1st `subType` will contain the entire rest of the graph so we target + // and select only the nodes we care about inside it and not `subType` itself. + + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 1, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "direction": "primary", + "joinType": "typeJoinOne", + "rootName": "contact", + "subTypeName": "address", }, }, - }, - }, - { - TargetNodeName: "subType", // The last subType (assert everything under it). - OccurancesToSkip: 1, - IncludeChildNodes: true, - ExpectedAttributes: dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "filter": nil, + { + TargetNodeName: "root", + OccurancesToSkip: 1, + IncludeChildNodes: true, + ExpectedAttributes: dataMap{ "scanNode": dataMap{ "filter": nil, - "collectionID": "5", - "collectionName": "ContactAddress", + "collectionID": "4", + "collectionName": "AuthorContact", "spans": []dataMap{ { - "start": "/5", - "end": "/6", + "start": "/4", + "end": "/5", + }, + }, + }, + }, + }, + { + TargetNodeName: "subType", // The last subType (assert everything under it). + OccurancesToSkip: 1, + IncludeChildNodes: true, + ExpectedAttributes: dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "_keys": nil, + "filter": nil, + "scanNode": dataMap{ + "filter": nil, + "collectionID": "5", + "collectionName": "ContactAddress", + "spans": []dataMap{ + { + "start": "/5", + "end": "/6", + }, + }, }, }, }, @@ -400,5 +247,5 @@ func TestDefaultExplainRequestWithTwoLevelDeepNestedJoins(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/type_join_test.go b/tests/integration/explain/default/type_join_test.go index b538047fb2..c3ca250565 100644 --- a/tests/integration/explain/default/type_join_test.go +++ b/tests/integration/explain/default/type_join_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -30,293 +31,217 @@ var normalTypeJoinPattern = dataMap{ } func TestDefaultExplainRequestWith2SingleJoinsAnd1ManyJoin(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with 2 single joins and 1 many join.", - Request: `query @explain { - Author { - OnlyEmail: contact { - email - } - articles { - name - } - contact { - cell - email - } - } - }`, - - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - // books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - }`, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, - // contact - 3: { - // _key: bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed - // "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - `{ - "cell": "5197212301", - "email": "john_grisham@example.com", - "address_id": "bae-c8448e47-6cd1-571f-90bd-364acb80da7b" - }`, - - // _key: bae-c0960a29-b704-5c37-9c2e-59e1249e4559 - // "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - `{ - "cell": "5197212302", - "email": "cornelia_funke@example.com", - "address_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, - - // address - 4: { - // _key: bae-c8448e47-6cd1-571f-90bd-364acb80da7b - // "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - `{ - "city": "Waterloo", - "country": "Canada" - }`, - - // _key: bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692 - // "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - `{ - "city": "Brampton", - "country": "Canada" + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author { + OnlyEmail: contact { + email + } + articles { + name + } + contact { + cell + email + } + } }`, - }, - }, - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "parallelNode": []dataMap{ - { - "typeIndexJoin": normalTypeJoinPattern, - }, - { - "typeIndexJoin": normalTypeJoinPattern, - }, - { - "typeIndexJoin": normalTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": normalTypeJoinPattern, + }, + { + "typeIndexJoin": normalTypeJoinPattern, + }, + { + "typeIndexJoin": normalTypeJoinPattern, + }, + }, }, }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - // 1st join's assertions. - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 0, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "direction": "primary", - "joinType": "typeJoinOne", - "rootName": "author", - "subTypeName": "contact", - }, - }, - { - // Note: `root` is not a node but is a special case because for typeIndexJoin we - // restructure to show both `root` and `subType` at the same level. - TargetNodeName: "root", - OccurancesToSkip: 0, - IncludeChildNodes: true, // We care about checking children nodes. - ExpectedAttributes: dataMap{ - "scanNode": dataMap{ - "filter": nil, - "collectionID": "3", - "collectionName": "Author", - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + // 1st join's assertions. + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 0, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "direction": "primary", + "joinType": "typeJoinOne", + "rootName": "author", + "subTypeName": "contact", }, }, - }, - }, - { - // Note: `subType` is not a node but is a special case because for typeIndexJoin we - // restructure to show both `root` and `subType` at the same level. - TargetNodeName: "subType", - OccurancesToSkip: 0, - IncludeChildNodes: true, // We care about checking children nodes. - ExpectedAttributes: dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "filter": nil, + { + // Note: `root` is not a node but is a special case because for typeIndexJoin we + // restructure to show both `root` and `subType` at the same level. + TargetNodeName: "root", + OccurancesToSkip: 0, + IncludeChildNodes: true, // We care about checking children nodes. + ExpectedAttributes: dataMap{ "scanNode": dataMap{ "filter": nil, - "collectionID": "4", - "collectionName": "AuthorContact", + "collectionID": "3", + "collectionName": "Author", "spans": []dataMap{ { - "start": "/4", - "end": "/5", + "start": "/3", + "end": "/4", }, }, }, }, }, - }, - }, - - // 2nd join's assertions (the one to many join). - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 1, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "articles", - }, - }, - { - // Note: `root` is not a node but is a special case because for typeIndexJoin we - // restructure to show both `root` and `subType` at the same level. - TargetNodeName: "root", - OccurancesToSkip: 1, - IncludeChildNodes: true, // We care about checking children nodes. - ExpectedAttributes: dataMap{ - "scanNode": dataMap{ - "filter": nil, - "collectionID": "3", - "collectionName": "Author", - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + // Note: `subType` is not a node but is a special case because for typeIndexJoin we + // restructure to show both `root` and `subType` at the same level. + TargetNodeName: "subType", + OccurancesToSkip: 0, + IncludeChildNodes: true, // We care about checking children nodes. + ExpectedAttributes: dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "_keys": nil, + "filter": nil, + "scanNode": dataMap{ + "filter": nil, + "collectionID": "4", + "collectionName": "AuthorContact", + "spans": []dataMap{ + { + "start": "/4", + "end": "/5", + }, + }, + }, + }, }, }, }, - }, - }, - { - // Note: `subType` is not a node but is a special case because for typeIndexJoin we - // restructure to show both `root` and `subType` at the same level. - TargetNodeName: "subType", - OccurancesToSkip: 1, - IncludeChildNodes: true, // We care about checking children nodes. - ExpectedAttributes: dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "filter": nil, + + // 2nd join's assertions (the one to many join). + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 1, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "articles", + }, + }, + { + // Note: `root` is not a node but is a special case because for typeIndexJoin we + // restructure to show both `root` and `subType` at the same level. + TargetNodeName: "root", + OccurancesToSkip: 1, + IncludeChildNodes: true, // We care about checking children nodes. + ExpectedAttributes: dataMap{ "scanNode": dataMap{ "filter": nil, - "collectionID": "1", - "collectionName": "Article", + "collectionID": "3", + "collectionName": "Author", "spans": []dataMap{ { - "start": "/1", - "end": "/2", + "start": "/3", + "end": "/4", + }, + }, + }, + }, + }, + { + // Note: `subType` is not a node but is a special case because for typeIndexJoin we + // restructure to show both `root` and `subType` at the same level. + TargetNodeName: "subType", + OccurancesToSkip: 1, + IncludeChildNodes: true, // We care about checking children nodes. + ExpectedAttributes: dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "_keys": nil, + "filter": nil, + "scanNode": dataMap{ + "filter": nil, + "collectionID": "1", + "collectionName": "Article", + "spans": []dataMap{ + { + "start": "/1", + "end": "/2", + }, + }, }, }, }, }, }, - }, - }, - // 3rd join's assertions (should be same as 1st one, so after `typeIndexJoin` lets just - // assert that the `scanNode`s are valid only. - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 2, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "direction": "primary", - "joinType": "typeJoinOne", - "rootName": "author", - "subTypeName": "contact", - }, - }, - { - // Note: `root` is not a node but is a special case because for typeIndexJoin we - // restructure to show both `root` and `subType` at the same level. - TargetNodeName: "scanNode", - OccurancesToSkip: 4, // As we encountered 2 `scanNode`s per join. - IncludeChildNodes: true, // Shouldn't have any. - ExpectedAttributes: dataMap{ - "filter": nil, - "collectionID": "3", - "collectionName": "Author", - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + // 3rd join's assertions (should be same as 1st one, so after `typeIndexJoin` lets just + // assert that the `scanNode`s are valid only. + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 2, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "direction": "primary", + "joinType": "typeJoinOne", + "rootName": "author", + "subTypeName": "contact", }, }, - }, - }, - { - // Note: `subType` is not a node but is a special case because for typeIndexJoin we - // restructure to show both `root` and `subType` at the same level. - TargetNodeName: "scanNode", - OccurancesToSkip: 5, // As we encountered 2 `scanNode`s per join + 1 in the `root` above. - IncludeChildNodes: true, // Shouldn't have any. - ExpectedAttributes: dataMap{ - "filter": nil, - "collectionID": "4", - "collectionName": "AuthorContact", - "spans": []dataMap{ - { - "start": "/4", - "end": "/5", + { + // Note: `root` is not a node but is a special case because for typeIndexJoin we + // restructure to show both `root` and `subType` at the same level. + TargetNodeName: "scanNode", + OccurancesToSkip: 4, // As we encountered 2 `scanNode`s per join. + IncludeChildNodes: true, // Shouldn't have any. + ExpectedAttributes: dataMap{ + "filter": nil, + "collectionID": "3", + "collectionName": "Author", + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + { + // Note: `subType` is not a node but is a special case because for typeIndexJoin we + // restructure to show both `root` and `subType` at the same level. + TargetNodeName: "scanNode", + OccurancesToSkip: 5, // As we encountered 2 `scanNode`s per join + 1 in the `root` above. + IncludeChildNodes: true, // Shouldn't have any. + ExpectedAttributes: dataMap{ + "filter": nil, + "collectionID": "4", + "collectionName": "AuthorContact", + "spans": []dataMap{ + { + "start": "/4", + "end": "/5", + }, + }, }, }, }, @@ -324,5 +249,5 @@ func TestDefaultExplainRequestWith2SingleJoinsAnd1ManyJoin(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/type_join_with_filter_and_key_test.go b/tests/integration/explain/default/type_join_with_filter_and_key_test.go new file mode 100644 index 0000000000..2290de03e6 --- /dev/null +++ b/tests/integration/explain/default/type_join_with_filter_and_key_test.go @@ -0,0 +1,195 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_default + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDefaultExplainRequestWithRelatedAndRegularFilterAndKeys(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with related and regular filter + keys.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + filter: { + name: {_eq: "John Grisham"}, + books: {name: {_eq: "Painted House"}} + }, + dockeys: [ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f8e" + ] + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": normalTypeJoinPattern, + }, + }, + }, + }, + }, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "selectNode", + ExpectedAttributes: dataMap{ + "_keys": []string{ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f8e", + }, + "filter": dataMap{ + "books": dataMap{ + "name": dataMap{ + "_eq": "Painted House", + }, + }, + }, + }, + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "name": dataMap{ + "_eq": "John Grisham", + }, + }, + "spans": []dataMap{ + { + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + }, + { + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f8e", + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f8f", + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDefaultExplainRequestWithManyRelatedFiltersAndKey(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with many related filters + key.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + filter: { + name: {_eq: "Cornelia Funke"}, + articles: {name: {_eq: "To my dear readers"}}, + books: {name: {_eq: "Theif Lord"}} + }, + dockeys: ["bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d"] + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": normalTypeJoinPattern, + }, + { + "typeIndexJoin": normalTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "selectNode", + ExpectedAttributes: dataMap{ + "_keys": []string{ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + "filter": dataMap{ + "articles": dataMap{ + "name": dataMap{ + "_eq": "To my dear readers", + }, + }, + "books": dataMap{ + "name": dataMap{ + "_eq": "Theif Lord", + }, + }, + }, + }, + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "name": dataMap{ + "_eq": "Cornelia Funke", + }, + }, + "spans": []dataMap{ + { + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/default/type_join_with_filter_test.go b/tests/integration/explain/default/type_join_with_filter_test.go new file mode 100644 index 0000000000..799ad2677d --- /dev/null +++ b/tests/integration/explain/default/type_join_with_filter_test.go @@ -0,0 +1,181 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_default + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestDefaultExplainRequestWithRelatedAndRegularFilter(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with related and regular filter.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + filter: { + name: {_eq: "John Grisham"}, + books: {name: {_eq: "Painted House"}} + } + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": normalTypeJoinPattern, + }, + }, + }, + }, + }, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "selectNode", + ExpectedAttributes: dataMap{ + "_keys": nil, + "filter": dataMap{ + "books": dataMap{ + "name": dataMap{ + "_eq": "Painted House", + }, + }, + }, + }, + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "name": dataMap{ + "_eq": "John Grisham", + }, + }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} + +func TestDefaultExplainRequestWithManyRelatedFilters(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Explain (default) request with many related filters.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + filter: { + name: {_eq: "Cornelia Funke"}, + articles: {name: {_eq: "To my dear readers"}}, + books: {name: {_eq: "Theif Lord"}} + } + ) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": normalTypeJoinPattern, + }, + { + "typeIndexJoin": normalTypeJoinPattern, + }, + }, + }, + }, + }, + }, + }, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "selectNode", + ExpectedAttributes: dataMap{ + "_keys": nil, + "filter": dataMap{ + "articles": dataMap{ + "name": dataMap{ + "_eq": "To my dear readers", + }, + }, + "books": dataMap{ + "name": dataMap{ + "_eq": "Theif Lord", + }, + }, + }, + }, + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "name": dataMap{ + "_eq": "Cornelia Funke", + }, + }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/default/update_test.go b/tests/integration/explain/default/update_test.go index 63c8f025ff..0b5ee28920 100644 --- a/tests/integration/explain/default/update_test.go +++ b/tests/integration/explain/default/update_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -29,75 +30,65 @@ var updatePattern = dataMap{ } func TestDefaultExplainMutationRequestWithUpdateUsingBooleanFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with update using boolean filter.", - Request: `mutation @explain { - update_Author( - filter: { - verified: { - _eq: true - } - }, - data: "{\"age\": 59}" - ) { - _key - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true - }`, - }, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedPatterns: []dataMap{updatePattern}, + testUtils.ExplainRequest{ - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "updateNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "data": dataMap{ - "age": float64(59), - }, - "filter": dataMap{ - "verified": dataMap{ - "_eq": true, + Request: `mutation @explain { + update_Author( + filter: { + verified: { + _eq: true + } }, - }, - "ids": []string(nil), - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "verified": dataMap{ - "_eq": true, + data: "{\"age\": 59}" + ) { + _key + name + age + } + }`, + + ExpectedPatterns: []dataMap{updatePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "updateNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "data": dataMap{ + "age": float64(59), + }, + "filter": dataMap{ + "verified": dataMap{ + "_eq": true, + }, + }, + "ids": []string(nil), }, }, - "spans": []dataMap{ - { - "end": "/4", - "start": "/3", + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "verified": dataMap{ + "_eq": true, + }, + }, + "spans": []dataMap{ + { + "end": "/4", + "start": "/3", + }, + }, }, }, }, @@ -105,77 +96,67 @@ func TestDefaultExplainMutationRequestWithUpdateUsingBooleanFilter(t *testing.T) }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainMutationRequestWithUpdateUsingIds(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with update using ids.", - Request: `mutation @explain { - update_Author( - ids: [ - "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", - "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - ], - data: "{\"age\": 59}" - ) { - _key - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `mutation @explain { + update_Author( + ids: [ + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" + ], + data: "{\"age\": 59}" + ) { + _key + name + age + } }`, - }, - }, - - ExpectedPatterns: []dataMap{updatePattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "updateNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "data": dataMap{ - "age": float64(59), - }, - "filter": nil, - "ids": []string{ - "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", - "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - }, - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "end": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67g", - "start": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + ExpectedPatterns: []dataMap{updatePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "updateNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "data": dataMap{ + "age": float64(59), + }, + "filter": nil, + "ids": []string{ + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, }, - { - "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", - "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "end": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67g", + "start": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + }, + { + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + }, }, }, }, @@ -183,69 +164,59 @@ func TestDefaultExplainMutationRequestWithUpdateUsingIds(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainMutationRequestWithUpdateUsingId(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with update using id.", - Request: `mutation @explain { - update_Author( - id: "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", - data: "{\"age\": 59}" - ) { - _key - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true - }`, - }, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedPatterns: []dataMap{updatePattern}, + testUtils.ExplainRequest{ - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "updateNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "data": dataMap{ - "age": float64(59), - }, - "filter": nil, - "ids": []string{ - "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + Request: `mutation @explain { + update_Author( + id: "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + data: "{\"age\": 59}" + ) { + _key + name + age + } + }`, + + ExpectedPatterns: []dataMap{updatePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "updateNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "data": dataMap{ + "age": float64(59), + }, + "filter": nil, + "ids": []string{ + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + }, + }, }, - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", - "end": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67g", + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + "end": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67g", + }, + }, }, }, }, @@ -253,90 +224,80 @@ func TestDefaultExplainMutationRequestWithUpdateUsingId(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainMutationRequestWithUpdateUsingIdsAndFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) mutation request with update using both ids and filter.", - Request: `mutation @explain { - update_Author( - filter: { - verified: { - _eq: true - } - }, - ids: [ - "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", - "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - ], - data: "{\"age\": 59}" - ) { - _key - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true - }`, - }, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedPatterns: []dataMap{updatePattern}, + testUtils.ExplainRequest{ - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "updateNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "data": dataMap{ - "age": float64(59), - }, - "filter": dataMap{ - "verified": dataMap{ - "_eq": true, + Request: `mutation @explain { + update_Author( + filter: { + verified: { + _eq: true + } }, - }, - "ids": []string{ - "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", - "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - }, - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "verified": dataMap{ - "_eq": true, + ids: [ + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" + ], + data: "{\"age\": 59}" + ) { + _key + name + age + } + }`, + + ExpectedPatterns: []dataMap{updatePattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "updateNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "data": dataMap{ + "age": float64(59), + }, + "filter": dataMap{ + "verified": dataMap{ + "_eq": true, + }, + }, + "ids": []string{ + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, }, }, - "spans": []dataMap{ - { - "start": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", - "end": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67g", - }, - { - "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "verified": dataMap{ + "_eq": true, + }, + }, + "spans": []dataMap{ + { + "start": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + "end": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67g", + }, + { + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + }, + }, }, }, }, @@ -344,5 +305,5 @@ func TestDefaultExplainMutationRequestWithUpdateUsingIdsAndFilter(t *testing.T) }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/utils.go b/tests/integration/explain/default/utils.go deleted file mode 100644 index a16264a1ac..0000000000 --- a/tests/integration/explain/default/utils.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package test_explain_default - -import ( - "testing" - - testUtils "github.com/sourcenetwork/defradb/tests/integration" - explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" -) - -type dataMap = map[string]any - -var bookAuthorGQLSchema = (` - type Article { - name: String - author: Author - pages: Int - } - - type Book { - name: String - author: Author - pages: Int - chapterPages: [Int!] - } - - type Author { - name: String - age: Int - verified: Boolean - books: [Book] - articles: [Article] - contact: AuthorContact - } - - type AuthorContact { - cell: String - email: String - author: Author - address: ContactAddress - } - - type ContactAddress { - city: String - country: String - contact: AuthorContact - } - -`) - -// TODO: This should be resolved in ISSUE#953 (github.com/sourcenetwork/defradb). -func executeTestCase(t *testing.T, test testUtils.RequestTestCase) { - testUtils.ExecuteRequestTestCase( - t, - bookAuthorGQLSchema, - []string{"Article", "Book", "Author", "AuthorContact", "ContactAddress"}, - test, - ) -} - -func runExplainTest(t *testing.T, test explainUtils.ExplainRequestTestCase) { - explainUtils.ExecuteExplainRequestTestCase( - t, - bookAuthorGQLSchema, - []string{"Article", "Book", "Author", "AuthorContact", "ContactAddress"}, - test, - ) -} - -var basicPattern = dataMap{ - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "scanNode": dataMap{}, - }, - }, - }, -} diff --git a/tests/integration/explain/default/with_average_join_test.go b/tests/integration/explain/default/with_average_join_test.go index 19b7c7f9cf..a48a1b97d2 100644 --- a/tests/integration/explain/default/with_average_join_test.go +++ b/tests/integration/explain/default/with_average_join_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -33,143 +34,106 @@ var averageTypeIndexJoinPattern = dataMap{ } func TestDefaultExplainRequestWithAverageOnJoinedField(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with average on joined/related field.", - Request: `query @explain { - Author { - name - _avg(books: {field: pages}) - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 22 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 178 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 321 - }`, - `{ - "name": "Incomplete book", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 79 - }`, - }, + testUtils.ExplainRequest{ - // authors - 2: { - // _key: "bae-25fafcc7-f251-58c1-9495-ead73e676fb8" - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" + Request: `query @explain { + Author { + name + _avg(books: {field: pages}) + } }`, - // _key: "bae-3dddb519-3612-5e43-86e5-49d6295d4f84" - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, - }, - ExpectedPatterns: []dataMap{averageTypeIndexJoinPattern}, + ExpectedPatterns: []dataMap{averageTypeIndexJoinPattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "averageNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{}, // no attributes - }, - { - TargetNodeName: "countNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "books", - "filter": dataMap{ - "pages": dataMap{ - "_ne": nil, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "averageNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{}, // no attributes + }, + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "books", + "filter": dataMap{ + "pages": dataMap{ + "_ne": nil, + }, + }, }, }, }, }, - }, - }, - { - TargetNodeName: "sumNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "childFieldName": "pages", - "fieldName": "books", - "filter": dataMap{ - "pages": dataMap{ - "_ne": nil, + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "childFieldName": "pages", + "fieldName": "books", + "filter": dataMap{ + "pages": dataMap{ + "_ne": nil, + }, + }, }, }, }, }, - }, - }, - { - TargetNodeName: "typeIndexJoin", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "books", - }, - }, - { - TargetNodeName: "scanNode", // inside of root type - OccurancesToSkip: 0, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + TargetNodeName: "typeIndexJoin", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "books", }, }, - }, - }, - { - TargetNodeName: "scanNode", // inside of subType (related type) - OccurancesToSkip: 1, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "2", - "collectionName": "Book", - "filter": dataMap{ - "pages": dataMap{ - "_ne": nil, + { + TargetNodeName: "scanNode", // inside of root type + OccurancesToSkip: 0, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, - "spans": []dataMap{ - { - "start": "/2", - "end": "/3", + { + TargetNodeName: "scanNode", // inside of subType (related type) + OccurancesToSkip: 1, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "2", + "collectionName": "Book", + "filter": dataMap{ + "pages": dataMap{ + "_ne": nil, + }, + }, + "spans": []dataMap{ + { + "start": "/2", + "end": "/3", + }, + }, }, }, }, @@ -177,105 +141,45 @@ func TestDefaultExplainRequestWithAverageOnJoinedField(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithAverageOnMultipleJoinedFieldsWithFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with average on multiple joined fields with filter.", - Request: `query @explain { - Author { - name - _avg( - books: {field: pages}, - articles: {field: pages, filter: {pages: {_gt: 3}}} - ) - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 2 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 11 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 31 - }`, - }, - - // books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 22, - "chapterPages": [1, 20] - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 178, - "chapterPages": [1, 11, 30, 50, 80, 120, 150] - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 321, - "chapterPages": [22, 211, 310] - }`, - `{ - "name": "Incomplete book", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 79, - "chapterPages": [1, 22, 33, 44, 55, 66] - }`, - }, + testUtils.ExplainRequest{ - // authors - 2: { - // _key: "bae-25fafcc7-f251-58c1-9495-ead73e676fb8" - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" + Request: `query @explain { + Author { + name + _avg( + books: {field: pages}, + articles: {field: pages, filter: {pages: {_gt: 3}}} + ) + } }`, - // _key: "bae-3dddb519-3612-5e43-86e5-49d6295d4f84" - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, - }, - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "averageNode": dataMap{ - "countNode": dataMap{ - "sumNode": dataMap{ - "selectNode": dataMap{ - "parallelNode": []dataMap{ - { - "typeIndexJoin": normalTypeJoinPattern, - }, - { - "typeIndexJoin": normalTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "averageNode": dataMap{ + "countNode": dataMap{ + "sumNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": normalTypeJoinPattern, + }, + { + "typeIndexJoin": normalTypeJoinPattern, + }, + }, }, }, }, @@ -284,156 +188,156 @@ func TestDefaultExplainRequestWithAverageOnMultipleJoinedFieldsWithFilter(t *tes }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "averageNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{}, // no attributes - }, - { - TargetNodeName: "countNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "books", - "filter": dataMap{ - "pages": dataMap{ - "_ne": nil, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "averageNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{}, // no attributes + }, + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "books", + "filter": dataMap{ + "pages": dataMap{ + "_ne": nil, + }, + }, + }, + { + "fieldName": "articles", + "filter": dataMap{ + "pages": dataMap{ + "_gt": int32(3), + "_ne": nil, + }, + }, }, }, }, - { - "fieldName": "articles", - "filter": dataMap{ - "pages": dataMap{ - "_gt": int32(3), - "_ne": nil, + }, + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "childFieldName": "pages", + "fieldName": "books", + "filter": dataMap{ + "pages": dataMap{ + "_ne": nil, + }, + }, + }, + { + "childFieldName": "pages", + "fieldName": "articles", + "filter": dataMap{ + "pages": dataMap{ + "_gt": int32(3), + "_ne": nil, + }, + }, }, }, }, }, - }, - }, - { - TargetNodeName: "sumNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "childFieldName": "pages", - "fieldName": "books", - "filter": dataMap{ - "pages": dataMap{ - "_ne": nil, + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 0, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "books", + }, + }, + { + TargetNodeName: "scanNode", // inside of 1st root type + OccurancesToSkip: 0, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", }, }, }, - { - "childFieldName": "pages", - "fieldName": "articles", + }, + { + TargetNodeName: "scanNode", // inside of 1st subType (related type) + OccurancesToSkip: 1, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "2", + "collectionName": "Book", "filter": dataMap{ "pages": dataMap{ - "_gt": int32(3), "_ne": nil, }, }, + "spans": []dataMap{ + { + "start": "/2", + "end": "/3", + }, + }, }, }, - }, - }, - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 0, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "books", - }, - }, - { - TargetNodeName: "scanNode", // inside of 1st root type - OccurancesToSkip: 0, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - { - TargetNodeName: "scanNode", // inside of 1st subType (related type) - OccurancesToSkip: 1, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "2", - "collectionName": "Book", - "filter": dataMap{ - "pages": dataMap{ - "_ne": nil, - }, - }, - "spans": []dataMap{ - { - "start": "/2", - "end": "/3", + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 1, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "articles", }, }, - }, - }, - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 1, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "articles", - }, - }, - { - TargetNodeName: "scanNode", // inside of 2nd root type (AKA: subType's root) - OccurancesToSkip: 2, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, - }, - }, - { - TargetNodeName: "scanNode", // inside of 2nd subType (AKA: subType's subtype) - OccurancesToSkip: 3, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "1", - "collectionName": "Article", - "filter": dataMap{ - "pages": dataMap{ - "_gt": int32(3), - "_ne": nil, + { + TargetNodeName: "scanNode", // inside of 2nd root type (AKA: subType's root) + OccurancesToSkip: 2, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, - "spans": []dataMap{ - { - "start": "/1", - "end": "/2", + { + TargetNodeName: "scanNode", // inside of 2nd subType (AKA: subType's subtype) + OccurancesToSkip: 3, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "1", + "collectionName": "Article", + "filter": dataMap{ + "pages": dataMap{ + "_gt": int32(3), + "_ne": nil, + }, + }, + "spans": []dataMap{ + { + "start": "/1", + "end": "/2", + }, + }, }, }, }, @@ -441,5 +345,5 @@ func TestDefaultExplainRequestWithAverageOnMultipleJoinedFieldsWithFilter(t *tes }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_average_test.go b/tests/integration/explain/default/with_average_test.go index 63f1739bde..41198fd88a 100644 --- a/tests/integration/explain/default/with_average_test.go +++ b/tests/integration/explain/default/with_average_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -33,75 +34,68 @@ var averagePattern = dataMap{ } func TestDefaultExplainRequestWithAverageOnArrayField(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with average on array field.", - Request: `query @explain { - Book { - name - _avg(chapterPages: {}) - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // books - 1: { - `{ - "name": "Painted House", - "chapterPages": [1, 22, 33, 44, 55, 66] - }`, - `{ - "name": "A Time for Mercy", - "chapterPages": [0, 22, 101, 321] + testUtils.ExplainRequest{ + + Request: `query @explain { + Book { + name + _avg(chapterPages: {}) + } }`, - }, - }, - ExpectedPatterns: []dataMap{averagePattern}, + ExpectedPatterns: []dataMap{averagePattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "averageNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{}, // no attributes - }, - { - TargetNodeName: "countNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "filter": dataMap{"_ne": nil}, - "fieldName": "chapterPages", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "averageNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{}, // no attributes + }, + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "filter": dataMap{"_ne": nil}, + "fieldName": "chapterPages", + }, + }, }, }, - }, - }, - { - TargetNodeName: "sumNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "filter": dataMap{"_ne": nil}, - "fieldName": "chapterPages", - "childFieldName": nil, + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "filter": dataMap{"_ne": nil}, + "fieldName": "chapterPages", + "childFieldName": nil, + }, + }, }, }, - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "2", - "collectionName": "Book", - "filter": nil, - "spans": []dataMap{ - { - "start": "/2", - "end": "/3", + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "2", + "collectionName": "Book", + "filter": nil, + "spans": []dataMap{ + { + "start": "/2", + "end": "/3", + }, + }, }, }, }, @@ -109,5 +103,5 @@ func TestDefaultExplainRequestWithAverageOnArrayField(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_count_join_test.go b/tests/integration/explain/default/with_count_join_test.go index e0bc02287f..6c116529a7 100644 --- a/tests/integration/explain/default/with_count_join_test.go +++ b/tests/integration/explain/default/with_count_join_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -29,117 +30,76 @@ var countTypeIndexJoinPattern = dataMap{ } func TestDefaultExplainRequestWithCountOnOneToManyJoinedField(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with count on a one-to-many joined field.", - Request: `query @explain { - Author { - name - numberOfBooks: _count(books: {}) - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + testUtils.ExplainRequest{ + + Request: `query @explain { + Author { + name + numberOfBooks: _count(books: {}) + } }`, - }, - }, - ExpectedPatterns: []dataMap{countTypeIndexJoinPattern}, + ExpectedPatterns: []dataMap{countTypeIndexJoinPattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "countNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "filter": nil, - "fieldName": "books", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "filter": nil, + "fieldName": "books", + }, + }, }, }, - }, - }, - { - TargetNodeName: "typeIndexJoin", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "books", - }, - }, - { - TargetNodeName: "scanNode", // inside of root - OccurancesToSkip: 0, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "filter": nil, - "collectionID": "3", - "collectionName": "Author", - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + TargetNodeName: "typeIndexJoin", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "books", }, }, - }, - }, - { - TargetNodeName: "scanNode", // inside of subType (related type) - OccurancesToSkip: 1, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "filter": nil, - "collectionID": "2", - "collectionName": "Book", - "spans": []dataMap{ - { - "start": "/2", - "end": "/3", + { + TargetNodeName: "scanNode", // inside of root + OccurancesToSkip: 0, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "filter": nil, + "collectionID": "3", + "collectionName": "Author", + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + { + TargetNodeName: "scanNode", // inside of subType (related type) + OccurancesToSkip: 1, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "filter": nil, + "collectionID": "2", + "collectionName": "Book", + "spans": []dataMap{ + { + "start": "/2", + "end": "/3", + }, + }, }, }, }, @@ -147,201 +107,150 @@ func TestDefaultExplainRequestWithCountOnOneToManyJoinedField(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithCountOnOneToManyJoinedFieldWithManySources(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with count on a one-to-many joined field with many sources.", - Request: `query @explain { - Author { - name - numberOfBooks: _count( - books: {} - articles: {} - ) - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 2 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 11 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 31 - }`, - }, + testUtils.ExplainRequest{ - // books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 22 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 101 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 321 + Request: `query @explain { + Author { + name + numberOfBooks: _count( + books: {} + articles: {} + ) + } }`, - }, - // authors - 2: { - // _key: "bae-25fafcc7-f251-58c1-9495-ead73e676fb8" - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - }`, - // _key: "bae-3dddb519-3612-5e43-86e5-49d6295d4f84" - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" - }`, - }, - }, - - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "countNode": dataMap{ - "selectNode": dataMap{ - "parallelNode": []dataMap{ - { - "typeIndexJoin": normalTypeJoinPattern, - }, - { - "typeIndexJoin": normalTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "countNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": normalTypeJoinPattern, + }, + { + "typeIndexJoin": normalTypeJoinPattern, + }, + }, }, }, }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "countNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "filter": nil, - "fieldName": "books", - }, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "filter": nil, + "fieldName": "books", + }, - { - "filter": nil, - "fieldName": "articles", + { + "filter": nil, + "fieldName": "articles", + }, + }, }, }, - }, - }, - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 0, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "books", - }, - }, - { - TargetNodeName: "scanNode", // inside of 1st root type - OccurancesToSkip: 0, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 0, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "books", }, }, - }, - }, - { - TargetNodeName: "scanNode", // inside of 1st subType (related type) - OccurancesToSkip: 1, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "2", - "collectionName": "Book", - "filter": nil, - "spans": []dataMap{ - { - "start": "/2", - "end": "/3", + { + TargetNodeName: "scanNode", // inside of 1st root type + OccurancesToSkip: 0, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, - }, - }, - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 1, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "articles", - }, - }, - { - TargetNodeName: "scanNode", // inside of 2nd root type (AKA: subType's root) - OccurancesToSkip: 2, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + TargetNodeName: "scanNode", // inside of 1st subType (related type) + OccurancesToSkip: 1, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "2", + "collectionName": "Book", + "filter": nil, + "spans": []dataMap{ + { + "start": "/2", + "end": "/3", + }, + }, }, }, - }, - }, - { - TargetNodeName: "scanNode", // inside of 2nd subType (AKA: subType's subtype) - OccurancesToSkip: 3, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "1", - "collectionName": "Article", - "filter": nil, - "spans": []dataMap{ - { - "start": "/1", - "end": "/2", + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 1, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "articles", + }, + }, + { + TargetNodeName: "scanNode", // inside of 2nd root type (AKA: subType's root) + OccurancesToSkip: 2, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + { + TargetNodeName: "scanNode", // inside of 2nd subType (AKA: subType's subtype) + OccurancesToSkip: 3, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "1", + "collectionName": "Article", + "filter": nil, + "spans": []dataMap{ + { + "start": "/1", + "end": "/2", + }, + }, }, }, }, @@ -349,5 +258,5 @@ func TestDefaultExplainRequestWithCountOnOneToManyJoinedFieldWithManySources(t * }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_count_test.go b/tests/integration/explain/default/with_count_test.go index 9bc6568f3c..212a4464c9 100644 --- a/tests/integration/explain/default/with_count_test.go +++ b/tests/integration/explain/default/with_count_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -29,91 +30,50 @@ var countPattern = dataMap{ } func TestDefaultExplainRequestWithCountOnInlineArrayField(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with count on an inline array field.", - Request: `query @explain { - Book { - name - _count(chapterPages: {}) - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - //articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - //books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - //authors - 2: { - // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + testUtils.ExplainRequest{ + + Request: `query @explain { + Book { + name + _count(chapterPages: {}) + } }`, - }, - }, - ExpectedPatterns: []dataMap{countPattern}, + ExpectedPatterns: []dataMap{countPattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "countNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "filter": nil, - "fieldName": "chapterPages", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "filter": nil, + "fieldName": "chapterPages", + }, + }, }, }, - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "filter": nil, - "collectionID": "2", - "collectionName": "Book", - "spans": []dataMap{ - { - "start": "/2", - "end": "/3", + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "filter": nil, + "collectionID": "2", + "collectionName": "Book", + "spans": []dataMap{ + { + "start": "/2", + "end": "/3", + }, + }, }, }, }, @@ -121,5 +81,5 @@ func TestDefaultExplainRequestWithCountOnInlineArrayField(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_filter_key_test.go b/tests/integration/explain/default/with_filter_key_test.go index 34a1160015..7f181a07f5 100644 --- a/tests/integration/explain/default/with_filter_key_test.go +++ b/tests/integration/explain/default/with_filter_key_test.go @@ -13,52 +13,52 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestDefaultExplainRequestWithDocKeyFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with dockey filter.", - Request: `query @explain { - Author(dockey: "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d") { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(dockey: "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d") { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "selectNode", + ExpectedAttributes: dataMap{ + "_keys": []string{ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + "filter": nil, + }, + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + }, + }, }, }, }, @@ -66,46 +66,51 @@ func TestDefaultExplainRequestWithDocKeyFilter(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithDocKeysFilterUsingOneKey(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with dockeys filter using one key.", - Request: `query @explain { - Author(dockeys: ["bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d"]) { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(dockeys: ["bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d"]) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "selectNode", + ExpectedAttributes: dataMap{ + "_keys": []string{ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + "filter": nil, + }, + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + }, + }, }, }, }, @@ -113,61 +118,61 @@ func TestDefaultExplainRequestWithDocKeysFilterUsingOneKey(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithDocKeysFilterUsingMultipleButDuplicateKeys(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with dockeys filter using multiple but duplicate keys.", - Request: `query @explain { - Author( - dockeys: [ - "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - ] - ) { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + dockeys: [ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" + ] + ) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "selectNode", + ExpectedAttributes: dataMap{ + "_keys": []string{ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + "filter": nil, }, - { - "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + }, + { + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + }, + }, }, }, }, @@ -175,61 +180,61 @@ func TestDefaultExplainRequestWithDocKeysFilterUsingMultipleButDuplicateKeys(t * }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithDocKeysFilterUsingMultipleUniqueKeys(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with dockeys filter using multiple unique keys.", - Request: `query @explain { - Author( - dockeys: [ - "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f" - ] - ) { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + dockeys: [ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f" + ] + ) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", - "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "selectNode", + ExpectedAttributes: dataMap{ + "_keys": []string{ + "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + }, + "filter": nil, }, - { - "start": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", - "end": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67g", + }, + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + "end": "/3/bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9e", + }, + { + "start": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f", + "end": "/3/bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67g", + }, + }, }, }, }, @@ -237,56 +242,59 @@ func TestDefaultExplainRequestWithDocKeysFilterUsingMultipleUniqueKeys(t *testin }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithMatchingKeyFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with a filter to match key.", - Request: `query @explain { - Author(filter: {_key: {_eq: "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d"}}) { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + filter: { + _key: { + _eq: "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" + } + } + ) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "_key": dataMap{ - "_eq": "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "selectNode", + ExpectedAttributes: dataMap{ + "_keys": nil, + "filter": nil, }, }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "_key": dataMap{ + "_eq": "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d", + }, + }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, }, @@ -294,5 +302,5 @@ func TestDefaultExplainRequestWithMatchingKeyFilter(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_filter_test.go b/tests/integration/explain/default/with_filter_test.go index 0b32d15b5a..a165f28876 100644 --- a/tests/integration/explain/default/with_filter_test.go +++ b/tests/integration/explain/default/with_filter_test.go @@ -13,56 +13,47 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestDefaultExplainRequestWithStringEqualFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with string equal (_eq) filter.", - Request: `query @explain { - Author(filter: {name: {_eq: "Lone"}}) { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(filter: {name: {_eq: "Lone"}}) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "name": dataMap{ - "_eq": "Lone", - }, - }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "name": dataMap{ + "_eq": "Lone", + }, + }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, }, @@ -70,56 +61,46 @@ func TestDefaultExplainRequestWithStringEqualFilter(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithIntegerEqualFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with integer equal (_eq) filter.", - Request: `query @explain { - Author(filter: {age: {_eq: 26}}) { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(filter: {age: {_eq: 26}}) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_eq": int32(26), - }, - }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "age": dataMap{ + "_eq": int32(26), + }, + }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, }, @@ -127,56 +108,46 @@ func TestDefaultExplainRequestWithIntegerEqualFilter(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithGreaterThanFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with greater than (_gt) filter.", - Request: `query @explain { - Author(filter: {age: {_gt: 20}}) { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - // bae-bfbfc89c-0d63-5ea4-81a3-3ebd295be67f - `{ - "name": "Lone", - "age": 26, - "verified": false - }`, - // "bae-079d0bd8-4b1b-5f5f-bd95-4d915c277f9d" - `{ - "name": "Shahzad Lone", - "age": 27, - "verified": true - }`, - }, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_gt": int32(20), - }, - }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(filter: {age: {_gt: 20}}) { + name + age + } + }`, + + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "age": dataMap{ + "_gt": int32(20), + }, + }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, }, @@ -184,204 +155,162 @@ func TestDefaultExplainRequestWithGreaterThanFilter(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithLogicalCompoundAndFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with logical compound (_and) filter.", - Request: `query @explain { - Author(filter: {_and: [{age: {_gt: 20}}, {age: {_lt: 50}}]}) { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "John", - "age": 21 - }`, - `{ - "name": "Bob", - "age": 32 - }`, - `{ - "name": "Carlo", - "age": 55 - }`, - `{ - "name": "Alice", - "age": 19 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(filter: {_and: [{age: {_gt: 20}}, {age: {_lt: 50}}]}) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "_and": []any{ - dataMap{ - "age": dataMap{ - "_gt": int32(20), + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "_and": []any{ + dataMap{ + "age": dataMap{ + "_gt": int32(20), + }, + }, + dataMap{ + "age": dataMap{ + "_lt": int32(50), + }, + }, }, }, - dataMap{ - "age": dataMap{ - "_lt": int32(50), + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", }, }, }, }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithLogicalCompoundOrFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with logical compound (_or) filter.", - Request: `query @explain { - Author(filter: {_or: [{age: {_eq: 55}}, {age: {_eq: 19}}]}) { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "John", - "age": 21 - }`, - `{ - "name": "Bob", - "age": 32 - }`, - `{ - "name": "Carlo", - "age": 55 - }`, - `{ - "name": "Alice", - "age": 19 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(filter: {_or: [{age: {_eq: 55}}, {age: {_eq: 19}}]}) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "_or": []any{ - dataMap{ - "age": dataMap{ - "_eq": int32(55), + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "_or": []any{ + dataMap{ + "age": dataMap{ + "_eq": int32(55), + }, + }, + dataMap{ + "age": dataMap{ + "_eq": int32(19), + }, + }, }, }, - dataMap{ - "age": dataMap{ - "_eq": int32(19), + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", }, }, }, }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", - }, - }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithMatchInsideList(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request filtering values that match within (_in) a list.", - Request: `query @explain { - Author(filter: {age: {_in: [19, 40, 55]}}) { - name - age - } - }`, - - Docs: map[int][]string{ - 2: { - `{ - "name": "John", - "age": 21 - }`, - `{ - "name": "Bob", - "age": 32 - }`, - `{ - "name": "Carlo", - "age": 55 - }`, - `{ - "name": "Alice", - "age": 19 + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(filter: {age: {_in: [19, 40, 55]}}) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{basicPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be last node, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": dataMap{ - "age": dataMap{ - "_in": []any{ - int32(19), - int32(40), - int32(55), + ExpectedPatterns: []dataMap{basicPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be last node, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": dataMap{ + "age": dataMap{ + "_in": []any{ + int32(19), + int32(40), + int32(55), + }, + }, + }, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, }, - }, - }, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", }, }, }, @@ -389,5 +318,5 @@ func TestDefaultExplainRequestWithMatchInsideList(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_limit_count_test.go b/tests/integration/explain/default/with_limit_count_test.go index 2389900d30..5ac40a24c5 100644 --- a/tests/integration/explain/default/with_limit_count_test.go +++ b/tests/integration/explain/default/with_limit_count_test.go @@ -13,280 +13,113 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestDefaultExplainRequestWithOnlyLimitOnRelatedChildWithCount(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with limit on related child with count.", - Request: `query @explain { - Author { - numberOfArts: _count(articles: {}) - articles(limit: 2) { - name - } - } - }`, - - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - - `{ - "name": "C++ 100", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 101", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 200", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 202", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "Rust 100", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 101", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 200", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 202", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - }, - - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - - // _key: bae-aa839756-588e-5b57-887d-33689a06e375 - `{ - "name": "Shahzad Sisley", - "age": 26, - "verified": true - }`, - - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, - - // _key: bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69 - `{ - "name": "Andrew Lone", - "age": 28, - "verified": true - }`, - }, - }, - - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "countNode": dataMap{ - "selectNode": dataMap{ - "parallelNode": []dataMap{ - { - "typeIndexJoin": limitTypeJoinPattern, - }, - { - "typeIndexJoin": normalTypeJoinPattern, + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author { + numberOfArts: _count(articles: {}) + articles(limit: 2) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "countNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": limitTypeJoinPattern, + }, + { + "typeIndexJoin": normalTypeJoinPattern, + }, + }, }, }, }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "countNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "articles", - "filter": nil, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "articles", + "filter": nil, + }, + }, + }, + }, + { + TargetNodeName: "limitNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": uint64(2), + "offset": uint64(0), }, }, - }, - }, - { - TargetNodeName: "limitNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": uint64(2), - "offset": uint64(0), }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithLimitArgsOnParentAndRelatedChildWithCount(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with limit args on parent and related child with count.", - Request: `query @explain { - Author(limit: 3, offset: 1) { - numberOfArts: _count(articles: {}) - articles(limit: 2) { - name - } - } - }`, - - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - - `{ - "name": "C++ 100", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 101", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 200", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 202", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "Rust 100", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 101", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 200", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 202", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - }, - - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - - // _key: bae-aa839756-588e-5b57-887d-33689a06e375 - `{ - "name": "Shahzad Sisley", - "age": 26, - "verified": true - }`, - - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, - - // _key: bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69 - `{ - "name": "Andrew Lone", - "age": 28, - "verified": true - }`, - }, - }, - - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "limitNode": dataMap{ - "countNode": dataMap{ - "selectNode": dataMap{ - "parallelNode": []dataMap{ - { - "typeIndexJoin": limitTypeJoinPattern, - }, - { - "typeIndexJoin": normalTypeJoinPattern, + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(limit: 3, offset: 1) { + numberOfArts: _count(articles: {}) + articles(limit: 2) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "limitNode": dataMap{ + "countNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": limitTypeJoinPattern, + }, + { + "typeIndexJoin": normalTypeJoinPattern, + }, + }, }, }, }, @@ -294,42 +127,42 @@ func TestDefaultExplainRequestWithLimitArgsOnParentAndRelatedChildWithCount(t *t }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "limitNode", - OccurancesToSkip: 0, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": uint64(3), - "offset": uint64(1), - }, - }, - { - TargetNodeName: "countNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "articles", - "filter": nil, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "limitNode", + OccurancesToSkip: 0, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": uint64(3), + "offset": uint64(1), + }, + }, + { + TargetNodeName: "countNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "articles", + "filter": nil, + }, + }, + }, + }, + { + TargetNodeName: "limitNode", + OccurancesToSkip: 1, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": uint64(2), + "offset": uint64(0), }, }, - }, - }, - { - TargetNodeName: "limitNode", - OccurancesToSkip: 1, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": uint64(2), - "offset": uint64(0), }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_limit_join_test.go b/tests/integration/explain/default/with_limit_join_test.go index 55d4681cfb..c56af3d646 100644 --- a/tests/integration/explain/default/with_limit_join_test.go +++ b/tests/integration/explain/default/with_limit_join_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -32,541 +33,205 @@ var limitTypeJoinPattern = dataMap{ } func TestDefaultExplainRequestWithOnlyLimitOnRelatedChild(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with only limit on related child.", - Request: `query @explain { - Author { - name - articles(limit: 1) { - name - } - } - }`, - - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - - `{ - "name": "C++ 100", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 101", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 200", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 202", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "Rust 100", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 101", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 200", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - `{ - "name": "Rust 202", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - }, - - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - - // _key: bae-aa839756-588e-5b57-887d-33689a06e375 - `{ - "name": "Shahzad Sisley", - "age": 26, - "verified": true - }`, - - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + testUtils.ExplainRequest{ - // _key: bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69 - `{ - "name": "Andrew Lone", - "age": 28, - "verified": true + Request: `query @explain { + Author { + name + articles(limit: 1) { + name + } + } }`, - }, - }, - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": limitTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": limitTypeJoinPattern, + }, + }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "limitNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": uint64(1), - "offset": uint64(0), + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "limitNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": uint64(1), + "offset": uint64(0), + }, + }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithOnlyOffsetOnRelatedChild(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with only offset on related child.", - Request: `query @explain { - Author { - name - articles(offset: 2) { - name - } - } - }`, - - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - - `{ - "name": "C++ 100", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 101", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 200", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 202", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "Rust 100", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 101", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 200", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 202", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - }, + Actions: []any{ + explainUtils.SchemaForExplainTests, - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - - // _key: bae-aa839756-588e-5b57-887d-33689a06e375 - `{ - "name": "Shahzad Sisley", - "age": 26, - "verified": true - }`, + testUtils.ExplainRequest{ - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + Request: `query @explain { + Author { + name + articles(offset: 2) { + name + } + } }`, - // _key: bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69 - `{ - "name": "Andrew Lone", - "age": 28, - "verified": true - }`, - }, - }, - - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": limitTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": limitTypeJoinPattern, + }, + }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "limitNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": nil, - "offset": uint64(2), + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "limitNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": nil, + "offset": uint64(2), + }, + }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithBothLimitAndOffsetOnRelatedChild(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with both limit and offset on related child.", - Request: `query @explain { - Author { - name - articles(limit: 2, offset: 2) { - name - } - } - }`, - - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, + testUtils.ExplainRequest{ - `{ - "name": "C++ 100", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" + Request: `query @explain { + Author { + name + articles(limit: 2, offset: 2) { + name + } + } }`, - `{ - "name": "C++ 101", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 200", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 202", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "Rust 100", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 101", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 200", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 202", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - }, - - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - - // _key: bae-aa839756-588e-5b57-887d-33689a06e375 - `{ - "name": "Shahzad Sisley", - "age": 26, - "verified": true - }`, - - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, - - // _key: bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69 - `{ - "name": "Andrew Lone", - "age": 28, - "verified": true - }`, - }, - }, - - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": limitTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": limitTypeJoinPattern, + }, + }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "limitNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": uint64(2), - "offset": uint64(2), + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "limitNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": uint64(2), + "offset": uint64(2), + }, + }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithLimitOnRelatedChildAndBothLimitAndOffsetOnParent(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with limit on related child & both limit + offset on parent.", - Request: `query @explain { - Author(limit: 3, offset: 1) { - name - articles(limit: 2) { - name - } - } - }`, - - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - - `{ - "name": "C++ 100", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 101", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 200", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "C++ 202", - "author_id": "bae-aa839756-588e-5b57-887d-33689a06e375" - }`, - - `{ - "name": "Rust 100", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 101", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 200", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - - `{ - "name": "Rust 202", - "author_id": "bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69" - }`, - }, - - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - - // _key: bae-aa839756-588e-5b57-887d-33689a06e375 - `{ - "name": "Shahzad Sisley", - "age": 26, - "verified": true - }`, - - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, - - // _key: bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69 - `{ - "name": "Andrew Lone", - "age": 28, - "verified": true - }`, - }, - }, - - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "limitNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": limitTypeJoinPattern, + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(limit: 3, offset: 1) { + name + articles(limit: 2) { + name + } + } + }`, + + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "limitNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": limitTypeJoinPattern, + }, + }, }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "limitNode", - OccurancesToSkip: 0, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": uint64(3), - "offset": uint64(1), - }, - }, - { - TargetNodeName: "limitNode", - OccurancesToSkip: 1, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": uint64(2), - "offset": uint64(0), + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "limitNode", + OccurancesToSkip: 0, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": uint64(3), + "offset": uint64(1), + }, + }, + { + TargetNodeName: "limitNode", + OccurancesToSkip: 1, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": uint64(2), + "offset": uint64(0), + }, + }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_limit_test.go b/tests/integration/explain/default/with_limit_test.go index b25ca7eed7..0d2ebbae23 100644 --- a/tests/integration/explain/default/with_limit_test.go +++ b/tests/integration/explain/default/with_limit_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -29,184 +30,106 @@ var limitPattern = dataMap{ } func TestDefaultExplainRequestWithOnlyLimit(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with only limit.", - Request: `query @explain { - Author(limit: 2) { - name - } - }`, - - Docs: map[int][]string{ - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - - // _key: bae-aa839756-588e-5b57-887d-33689a06e375 - `{ - "name": "Shahzad Sisley", - "age": 26, - "verified": true - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + testUtils.ExplainRequest{ - // _key: bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69 - `{ - "name": "Andrew Lone", - "age": 28, - "verified": true + Request: `query @explain { + Author(limit: 2) { + name + } }`, - }, - }, - ExpectedPatterns: []dataMap{limitPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "limitNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": uint64(2), - "offset": uint64(0), + ExpectedPatterns: []dataMap{limitPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "limitNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": uint64(2), + "offset": uint64(0), + }, + }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithOnlyOffset(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with only offset.", - Request: `query @explain { - Author(offset: 2) { - name - } - }`, - - Docs: map[int][]string{ - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - // _key: bae-aa839756-588e-5b57-887d-33689a06e375 - `{ - "name": "Shahzad Sisley", - "age": 26, - "verified": true - }`, + testUtils.ExplainRequest{ - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + Request: `query @explain { + Author(offset: 2) { + name + } }`, - // _key: bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69 - `{ - "name": "Andrew Lone", - "age": 28, - "verified": true - }`, - }, - }, - - ExpectedPatterns: []dataMap{limitPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "limitNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": nil, - "offset": uint64(2), + ExpectedPatterns: []dataMap{limitPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "limitNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": nil, + "offset": uint64(2), + }, + }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithLimitAndOffset(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with limit and offset.", - Request: `query @explain { - Author(limit: 3, offset: 1) { - name - } - }`, - - Docs: map[int][]string{ - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - - // _key: bae-aa839756-588e-5b57-887d-33689a06e375 - `{ - "name": "Shahzad Sisley", - "age": 26, - "verified": true - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, + testUtils.ExplainRequest{ - // _key: bae-e7e87bbb-1079-59db-b4b9-0e14b24d5b69 - `{ - "name": "Andrew Lone", - "age": 28, - "verified": true + Request: `query @explain { + Author(limit: 3, offset: 1) { + name + } }`, - }, - }, - - ExpectedPatterns: []dataMap{limitPattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "limitNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "limit": uint64(3), - "offset": uint64(1), + ExpectedPatterns: []dataMap{limitPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "limitNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "limit": uint64(3), + "offset": uint64(1), + }, + }, }, }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_order_join_test.go b/tests/integration/explain/default/with_order_join_test.go index 16b77c64aa..ddffdeb776 100644 --- a/tests/integration/explain/default/with_order_join_test.go +++ b/tests/integration/explain/default/with_order_join_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -32,75 +33,48 @@ var orderTypeJoinPattern = dataMap{ } func TestDefaultExplainRequestWithOrderFieldOnRelatedChild(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with order field on a related child.", - Request: `query @explain { - Author { - name - articles(order: {name: DESC}) { - name - } - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, + testUtils.ExplainRequest{ - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + Request: `query @explain { + Author { + name + articles(order: {name: DESC}) { + name + } + } }`, - }, - }, - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": orderTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": orderTypeJoinPattern, + }, + }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "orderNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "orderings": []dataMap{ - { - "direction": "DESC", - "fields": []string{ - "name", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "orderNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "orderings": []dataMap{ + { + "direction": "DESC", + "fields": []string{ + "name", + }, + }, }, }, }, @@ -109,97 +83,70 @@ func TestDefaultExplainRequestWithOrderFieldOnRelatedChild(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithOrderFieldOnParentAndRelatedChild(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with order field on parent and related child.", - Request: `query @explain { - Author(order: {name: ASC}) { - name - articles(order: {name: DESC}) { - name - } - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, + testUtils.ExplainRequest{ - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true + Request: `query @explain { + Author(order: {name: ASC}) { + name + articles(order: {name: DESC}) { + name + } + } }`, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false - }`, - }, - }, - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "orderNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": orderTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "orderNode": dataMap{ + "selectNode": dataMap{ + "typeIndexJoin": orderTypeJoinPattern, + }, + }, }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "orderNode", - OccurancesToSkip: 0, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "orderings": []dataMap{ - { - "direction": "ASC", - "fields": []string{ - "name", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "orderNode", + OccurancesToSkip: 0, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "orderings": []dataMap{ + { + "direction": "ASC", + "fields": []string{ + "name", + }, + }, }, }, }, - }, - }, - { - TargetNodeName: "orderNode", - OccurancesToSkip: 1, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "orderings": []dataMap{ - { - "direction": "DESC", - "fields": []string{ - "name", + { + TargetNodeName: "orderNode", + OccurancesToSkip: 1, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "orderings": []dataMap{ + { + "direction": "DESC", + "fields": []string{ + "name", + }, + }, }, }, }, @@ -208,92 +155,35 @@ func TestDefaultExplainRequestWithOrderFieldOnParentAndRelatedChild(t *testing.T }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWhereParentIsOrderedByItsRelatedChild(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request where parent is ordered by it's related child.", - Request: `query @explain { - Author( - order: { - articles: {name: ASC} - } - ) { - articles { - name - } - } - }`, - - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04" - }`, - }, - - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author( + order: { + articles: {name: ASC} + } + ) { + articles { + name + } + } }`, - }, - }, - - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "orderNode": dataMap{ - "selectNode": dataMap{ - "typeIndexJoin": normalTypeJoinPattern, - }, - }, - }, - }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "orderNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "orderings": []dataMap{ - { - "direction": "ASC", - "fields": []string{ - "articles", - "name", - }, - }, - }, - }, + ExpectedError: "Argument \"order\" has invalid value {articles: {name: ASC}}.\nIn field \"articles\": Unknown field.", }, }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_order_test.go b/tests/integration/explain/default/with_order_test.go index b953d1b86c..fda0eda753 100644 --- a/tests/integration/explain/default/with_order_test.go +++ b/tests/integration/explain/default/with_order_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -29,47 +30,36 @@ var orderPattern = dataMap{ } func TestDefaultExplainRequestWithAscendingOrderOnParent(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with ascending order on parent.", - Request: `query @explain { - Author(order: {age: ASC}) { - name - age - } - }`, - - Docs: map[int][]string{ - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(order: {age: ASC}) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{orderPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "orderNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "orderings": []dataMap{ - { - "direction": "ASC", - "fields": []string{ - "age", + ExpectedPatterns: []dataMap{orderPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "orderNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "orderings": []dataMap{ + { + "direction": "ASC", + "fields": []string{ + "age", + }, + }, }, }, }, @@ -78,57 +68,46 @@ func TestDefaultExplainRequestWithAscendingOrderOnParent(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with multiple order fields on parent.", - Request: `query @explain { - Author(order: {name: ASC, age: DESC}) { - name - age - } - }`, - - Docs: map[int][]string{ - // authors - 2: { - // _key: bae-41598f0c-19bc-5da6-813b-e80f14a10df3 - `{ - "name": "John Grisham", - "age": 65, - "verified": true - }`, - // _key: bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false + Actions: []any{ + explainUtils.SchemaForExplainTests, + + testUtils.ExplainRequest{ + + Request: `query @explain { + Author(order: {name: ASC, age: DESC}) { + name + age + } }`, - }, - }, - ExpectedPatterns: []dataMap{orderPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "orderNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "orderings": []dataMap{ - { - "direction": "ASC", - "fields": []string{ - "name", - }, - }, - { - "direction": "DESC", - "fields": []string{ - "age", + ExpectedPatterns: []dataMap{orderPattern}, + + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "orderNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "orderings": []dataMap{ + { + "direction": "ASC", + "fields": []string{ + "name", + }, + }, + { + "direction": "DESC", + "fields": []string{ + "age", + }, + }, }, }, }, @@ -137,5 +116,5 @@ func TestDefaultExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_sum_join_test.go b/tests/integration/explain/default/with_sum_join_test.go index ebd2bbe2d7..74d330fefd 100644 --- a/tests/integration/explain/default/with_sum_join_test.go +++ b/tests/integration/explain/default/with_sum_join_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -29,112 +30,80 @@ var sumTypeIndexJoinPattern = dataMap{ } func TestDefaultExplainRequestWithSumOnOneToManyJoinedField(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with sum on a one-to-many joined field.", - Request: `query @explain { - Author { - name - _key - TotalPages: _sum( - books: {field: pages} - ) - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 22 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 101 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 321 - }`, - }, + testUtils.ExplainRequest{ - // authors - 2: { - // _key: "bae-25fafcc7-f251-58c1-9495-ead73e676fb8" - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - }`, - // _key: "bae-3dddb519-3612-5e43-86e5-49d6295d4f84" - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" + Request: `query @explain { + Author { + name + _key + TotalPages: _sum( + books: {field: pages} + ) + } }`, - }, - }, - ExpectedPatterns: []dataMap{sumTypeIndexJoinPattern}, + ExpectedPatterns: []dataMap{sumTypeIndexJoinPattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "sumNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "books", - "childFieldName": "pages", - "filter": nil, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "books", + "childFieldName": "pages", + "filter": nil, + }, + }, }, }, - }, - }, - { - TargetNodeName: "typeIndexJoin", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "books", - }, - }, - { - TargetNodeName: "scanNode", // inside of root - OccurancesToSkip: 0, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + TargetNodeName: "typeIndexJoin", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "books", }, }, - }, - }, - { - TargetNodeName: "scanNode", // inside of subType (related type) - OccurancesToSkip: 1, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "2", - "collectionName": "Book", - "filter": nil, - "spans": []dataMap{ - { - "start": "/2", - "end": "/3", + { + TargetNodeName: "scanNode", // inside of root + OccurancesToSkip: 0, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, + }, + }, + { + TargetNodeName: "scanNode", // inside of subType (related type) + OccurancesToSkip: 1, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "2", + "collectionName": "Book", + "filter": nil, + "spans": []dataMap{ + { + "start": "/2", + "end": "/3", + }, + }, }, }, }, @@ -142,130 +111,98 @@ func TestDefaultExplainRequestWithSumOnOneToManyJoinedField(t *testing.T) { }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithSumOnOneToManyJoinedFieldWithFilter(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with sum on a one-to-many joined field, with filter.", - Request: `query @explain { - Author { - name - TotalPages: _sum( - articles: { - field: pages, - filter: { - name: { - _eq: "To my dear readers" - } - } - } - ) - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 2 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 11 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 31 - }`, - }, + testUtils.ExplainRequest{ - // authors - 2: { - // _key: "bae-25fafcc7-f251-58c1-9495-ead73e676fb8" - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - }`, - // _key: "bae-3dddb519-3612-5e43-86e5-49d6295d4f84" - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" + Request: `query @explain { + Author { + name + TotalPages: _sum( + articles: { + field: pages, + filter: { + name: { + _eq: "To my dear readers" + } + } + } + ) + } }`, - }, - }, - ExpectedPatterns: []dataMap{sumTypeIndexJoinPattern}, + ExpectedPatterns: []dataMap{sumTypeIndexJoinPattern}, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "sumNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "articles", - "childFieldName": "pages", - "filter": dataMap{ - "name": dataMap{ - "_eq": "To my dear readers", + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "articles", + "childFieldName": "pages", + "filter": dataMap{ + "name": dataMap{ + "_eq": "To my dear readers", + }, + }, }, }, }, }, - }, - }, - { - TargetNodeName: "typeIndexJoin", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "articles", - }, - }, - { - TargetNodeName: "scanNode", // inside of root - OccurancesToSkip: 0, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + TargetNodeName: "typeIndexJoin", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "articles", }, }, - }, - }, - { - TargetNodeName: "scanNode", // inside of subType (related type) - OccurancesToSkip: 1, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "1", - "collectionName": "Article", - "filter": dataMap{ - "name": dataMap{ - "_eq": "To my dear readers", + { + TargetNodeName: "scanNode", // inside of root + OccurancesToSkip: 0, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, - "spans": []dataMap{ - { - "start": "/1", - "end": "/2", + { + TargetNodeName: "scanNode", // inside of subType (related type) + OccurancesToSkip: 1, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "1", + "collectionName": "Article", + "filter": dataMap{ + "name": dataMap{ + "_eq": "To my dear readers", + }, + }, + "spans": []dataMap{ + { + "start": "/1", + "end": "/2", + }, + }, }, }, }, @@ -273,203 +210,152 @@ func TestDefaultExplainRequestWithSumOnOneToManyJoinedFieldWithFilter(t *testing }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestDefaultExplainRequestWithSumOnOneToManyJoinedFieldWithManySources(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with sum on a one-to-many joined field with many sources.", - Request: `query @explain { - Author { - name - TotalPages: _sum( - books: {field: pages}, - articles: {field: pages} - ) - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // articles - 0: { - `{ - "name": "After Guantánamo, Another Injustice", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 2 - }`, - `{ - "name": "To my dear readers", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 11 - }`, - `{ - "name": "Twinklestar's Favourite Xmas Cookie", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 31 - }`, - }, - - // books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 22 - }`, - `{ - "name": "A Time for Mercy", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 101 - }`, - `{ - "name": "Theif Lord", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 321 - }`, - }, + testUtils.ExplainRequest{ - // authors - 2: { - // _key: "bae-25fafcc7-f251-58c1-9495-ead73e676fb8" - `{ - "name": "John Grisham", - "age": 65, - "verified": true, - "contact_id": "bae-1fe427b8-ab8d-56c3-9df2-826a6ce86fed" - }`, - // _key: "bae-3dddb519-3612-5e43-86e5-49d6295d4f84" - `{ - "name": "Cornelia Funke", - "age": 62, - "verified": false, - "contact_id": "bae-c0960a29-b704-5c37-9c2e-59e1249e4559" + Request: `query @explain { + Author { + name + TotalPages: _sum( + books: {field: pages}, + articles: {field: pages} + ) + } }`, - }, - }, - ExpectedPatterns: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "sumNode": dataMap{ - "selectNode": dataMap{ - "parallelNode": []dataMap{ - { - "typeIndexJoin": normalTypeJoinPattern, - }, - { - "typeIndexJoin": normalTypeJoinPattern, + ExpectedPatterns: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "sumNode": dataMap{ + "selectNode": dataMap{ + "parallelNode": []dataMap{ + { + "typeIndexJoin": normalTypeJoinPattern, + }, + { + "typeIndexJoin": normalTypeJoinPattern, + }, + }, }, }, }, }, }, }, - }, - }, - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "sumNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "childFieldName": "pages", - "fieldName": "books", - "filter": nil, - }, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "childFieldName": "pages", + "fieldName": "books", + "filter": nil, + }, - { - "childFieldName": "pages", - "fieldName": "articles", + { + "childFieldName": "pages", + "fieldName": "articles", + "filter": nil, + }, + }, + }, + }, + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 0, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "books", + }, + }, + { + TargetNodeName: "scanNode", // inside of 1st root type + OccurancesToSkip: 0, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, - }, - }, - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 0, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "books", - }, - }, - { - TargetNodeName: "scanNode", // inside of 1st root type - OccurancesToSkip: 0, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + TargetNodeName: "scanNode", // inside of 1st subType (related type) + OccurancesToSkip: 1, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "2", + "collectionName": "Book", + "filter": nil, + "spans": []dataMap{ + { + "start": "/2", + "end": "/3", + }, + }, }, }, - }, - }, - { - TargetNodeName: "scanNode", // inside of 1st subType (related type) - OccurancesToSkip: 1, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "2", - "collectionName": "Book", - "filter": nil, - "spans": []dataMap{ - { - "start": "/2", - "end": "/3", + { + TargetNodeName: "typeIndexJoin", + OccurancesToSkip: 1, + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "joinType": "typeJoinMany", + "rootName": "author", + "subTypeName": "articles", }, }, - }, - }, - { - TargetNodeName: "typeIndexJoin", - OccurancesToSkip: 1, - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "joinType": "typeJoinMany", - "rootName": "author", - "subTypeName": "articles", - }, - }, - { - TargetNodeName: "scanNode", // inside of 2nd root type (AKA: subType's root) - OccurancesToSkip: 2, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "3", - "collectionName": "Author", - "filter": nil, - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + { + TargetNodeName: "scanNode", // inside of 2nd root type (AKA: subType's root) + OccurancesToSkip: 2, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "3", + "collectionName": "Author", + "filter": nil, + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, - }, - }, - { - TargetNodeName: "scanNode", // inside of 2nd subType (AKA: subType's subtype) - OccurancesToSkip: 3, - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "1", - "collectionName": "Article", - "filter": nil, - "spans": []dataMap{ - { - "start": "/1", - "end": "/2", + { + TargetNodeName: "scanNode", // inside of 2nd subType (AKA: subType's subtype) + OccurancesToSkip: 3, + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "1", + "collectionName": "Article", + "filter": nil, + "spans": []dataMap{ + { + "start": "/1", + "end": "/2", + }, + }, }, }, }, @@ -477,5 +363,5 @@ func TestDefaultExplainRequestWithSumOnOneToManyJoinedFieldWithManySources(t *te }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/default/with_sum_test.go b/tests/integration/explain/default/with_sum_test.go index 05643560a2..f7fbc8e715 100644 --- a/tests/integration/explain/default/with_sum_test.go +++ b/tests/integration/explain/default/with_sum_test.go @@ -13,6 +13,7 @@ package test_explain_default import ( "testing" + testUtils "github.com/sourcenetwork/defradb/tests/integration" explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) @@ -29,70 +30,51 @@ var sumPattern = dataMap{ } func TestDefaultExplainRequestWithSumOnInlineArrayField_ChildFieldWillBeEmpty(t *testing.T) { - test := explainUtils.ExplainRequestTestCase{ + test := testUtils.TestCase{ Description: "Explain (default) request with sum on an inline array field.", - Request: `query @explain { - Book { - name - NotSureWhySomeoneWouldSumTheChapterPagesButHereItIs: _sum(chapterPages: {}) - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - // books - 1: { - `{ - "name": "Painted House", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 77, - "chapterPages": [1, 22, 33, 44, 55, 66] - }`, // sum of chapterPages == 221 + testUtils.ExplainRequest{ - `{ - "name": "A Time for Mercy", - "author_id": "bae-25fafcc7-f251-58c1-9495-ead73e676fb8", - "pages": 55, - "chapterPages": [1, 22] - }`, // sum of chapterPages == 23 + Request: `query @explain { + Book { + name + NotSureWhySomeoneWouldSumTheChapterPagesButHereItIs: _sum(chapterPages: {}) + } + }`, - `{ - "name": "Theif Lord", - "author_id": "bae-3dddb519-3612-5e43-86e5-49d6295d4f84", - "pages": 321, - "chapterPages": [10, 50, 100, 200, 300] - }`, // sum of chapterPages == 660 - }, - }, + ExpectedPatterns: []dataMap{sumPattern}, - ExpectedPatterns: []dataMap{sumPattern}, - - ExpectedTargets: []explainUtils.PlanNodeTargetCase{ - { - TargetNodeName: "sumNode", - IncludeChildNodes: false, - ExpectedAttributes: dataMap{ - "sources": []dataMap{ - { - "fieldName": "chapterPages", - "childFieldName": nil, - "filter": nil, + ExpectedTargets: []testUtils.PlanNodeTargetCase{ + { + TargetNodeName: "sumNode", + IncludeChildNodes: false, + ExpectedAttributes: dataMap{ + "sources": []dataMap{ + { + "fieldName": "chapterPages", + "childFieldName": nil, + "filter": nil, + }, + }, }, }, - }, - }, - { - TargetNodeName: "scanNode", - IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. - ExpectedAttributes: dataMap{ - "collectionID": "2", - "collectionName": "Book", - "filter": nil, - "spans": []dataMap{ - { - "start": "/2", - "end": "/3", + { + TargetNodeName: "scanNode", + IncludeChildNodes: true, // should be leaf of it's branch, so will have no child nodes. + ExpectedAttributes: dataMap{ + "collectionID": "2", + "collectionName": "Book", + "filter": nil, + "spans": []dataMap{ + { + "start": "/2", + "end": "/3", + }, + }, }, }, }, @@ -100,5 +82,5 @@ func TestDefaultExplainRequestWithSumOnInlineArrayField_ChildFieldWillBeEmpty(t }, } - runExplainTest(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/create_test.go b/tests/integration/explain/execute/create_test.go index e40343f7df..e8ab75d48a 100644 --- a/tests/integration/explain/execute/create_test.go +++ b/tests/integration/explain/execute/create_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainMutationRequestWithCreate(t *testing.T) { @@ -22,16 +23,16 @@ func TestExecuteExplainMutationRequestWithCreate(t *testing.T) { Description: "Explain (execute) mutation request with create.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, - testUtils.Request{ + testUtils.ExplainRequest{ Request: `mutation @explain(type: execute) { create_Author(data: "{\"name\": \"Shahzad Lone\",\"age\": 27,\"verified\": true}") { name } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -44,9 +45,9 @@ func TestExecuteExplainMutationRequestWithCreate(t *testing.T) { "iterations": uint64(1), "filterMatches": uint64(1), "scanNode": dataMap{ - "iterations": uint64(1), - "docFetches": uint64(1), - "filterMatches": uint64(1), + "iterations": uint64(1), + "docFetches": uint64(1), + "fieldFetches": uint64(1), }, }, }, @@ -58,5 +59,5 @@ func TestExecuteExplainMutationRequestWithCreate(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/dagscan_test.go b/tests/integration/explain/execute/dagscan_test.go index 7133286b64..9b91ff5003 100644 --- a/tests/integration/explain/execute/dagscan_test.go +++ b/tests/integration/explain/execute/dagscan_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainCommitsDagScan(t *testing.T) { @@ -22,12 +23,12 @@ func TestExecuteExplainCommitsDagScan(t *testing.T) { Description: "Explain (execute) commits request - dagScan.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Authors create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { commits (dockey: "bae-7f54d9e0-cbde-5320-aa6c-5c8895a89138") { links { @@ -36,7 +37,7 @@ func TestExecuteExplainCommitsDagScan(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -58,7 +59,7 @@ func TestExecuteExplainCommitsDagScan(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainLatestCommitsDagScan(t *testing.T) { @@ -67,12 +68,12 @@ func TestExecuteExplainLatestCommitsDagScan(t *testing.T) { Description: "Explain (execute) latest commits request - dagScan.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Author create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { latestCommits(dockey: "bae-7f54d9e0-cbde-5320-aa6c-5c8895a89138") { cid @@ -82,7 +83,7 @@ func TestExecuteExplainLatestCommitsDagScan(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -104,5 +105,5 @@ func TestExecuteExplainLatestCommitsDagScan(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/delete_test.go b/tests/integration/explain/execute/delete_test.go index eaefde6076..13411b5f5e 100644 --- a/tests/integration/explain/execute/delete_test.go +++ b/tests/integration/explain/execute/delete_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainMutationRequestWithDeleteUsingID(t *testing.T) { @@ -22,19 +23,19 @@ func TestExecuteExplainMutationRequestWithDeleteUsingID(t *testing.T) { Description: "Explain (execute) mutation request with deletion using id.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Addresses create2AddressDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `mutation @explain(type: execute) { delete_ContactAddress(ids: ["bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692"]) { city } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -47,9 +48,9 @@ func TestExecuteExplainMutationRequestWithDeleteUsingID(t *testing.T) { "iterations": uint64(2), "filterMatches": uint64(1), "scanNode": dataMap{ - "iterations": uint64(2), - "docFetches": uint64(2), - "filterMatches": uint64(1), + "iterations": uint64(2), + "docFetches": uint64(1), + "fieldFetches": uint64(1), }, }, }, @@ -61,7 +62,7 @@ func TestExecuteExplainMutationRequestWithDeleteUsingID(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainMutationRequestWithDeleteUsingFilter(t *testing.T) { @@ -70,19 +71,19 @@ func TestExecuteExplainMutationRequestWithDeleteUsingFilter(t *testing.T) { Description: "Explain (execute) mutation request with deletion using filter.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Author create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `mutation @explain(type: execute) { delete_Author(filter: {name: {_like: "%Funke%"}}) { name } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -95,9 +96,9 @@ func TestExecuteExplainMutationRequestWithDeleteUsingFilter(t *testing.T) { "iterations": uint64(2), "filterMatches": uint64(1), "scanNode": dataMap{ - "iterations": uint64(2), - "docFetches": uint64(3), - "filterMatches": uint64(1), + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -109,5 +110,5 @@ func TestExecuteExplainMutationRequestWithDeleteUsingFilter(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/fixture.go b/tests/integration/explain/execute/fixture.go index 4025055b76..ec83514778 100644 --- a/tests/integration/explain/execute/fixture.go +++ b/tests/integration/explain/execute/fixture.go @@ -11,62 +11,11 @@ package test_explain_execute import ( - "testing" - testUtils "github.com/sourcenetwork/defradb/tests/integration" ) type dataMap = map[string]any -func gqlSchemaExecuteExplain() testUtils.SchemaUpdate { - return testUtils.SchemaUpdate{ - Schema: (` - type Article { - name: String - author: Author - pages: Int - } - - type Book { - name: String - author: Author - pages: Int - chapterPages: [Int!] - } - - type Author { - name: String - age: Int - verified: Boolean - books: [Book] - articles: [Article] - contact: AuthorContact - } - - type AuthorContact { - cell: String - email: String - author: Author - address: ContactAddress - } - - type ContactAddress { - city: String - country: String - contact: AuthorContact - } - `), - } -} - -func executeTestCase(t *testing.T, test testUtils.TestCase) { - testUtils.ExecuteTestCase( - t, - []string{"Article", "Book", "Author", "AuthorContact", "ContactAddress"}, - test, - ) -} - func create3ArticleDocuments() []testUtils.CreateDoc { return []testUtils.CreateDoc{ { diff --git a/tests/integration/explain/execute/group_test.go b/tests/integration/explain/execute/group_test.go index 183ce890db..3b7e42c845 100644 --- a/tests/integration/explain/execute/group_test.go +++ b/tests/integration/explain/execute/group_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainRequestWithGroup(t *testing.T) { @@ -22,12 +23,12 @@ func TestExecuteExplainRequestWithGroup(t *testing.T) { Description: "Explain (execute) request with groupBy.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Books create2AddressDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { ContactAddress(groupBy: [country]) { country @@ -37,7 +38,7 @@ func TestExecuteExplainRequestWithGroup(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -55,9 +56,9 @@ func TestExecuteExplainRequestWithGroup(t *testing.T) { "iterations": uint64(3), "filterMatches": uint64(2), "scanNode": dataMap{ - "iterations": uint64(4), - "docFetches": uint64(4), - "filterMatches": uint64(2), + "iterations": uint64(4), + "docFetches": uint64(2), + "fieldFetches": uint64(4), }, }, }, @@ -69,5 +70,5 @@ func TestExecuteExplainRequestWithGroup(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/query_deleted_docs_test.go b/tests/integration/explain/execute/query_deleted_docs_test.go new file mode 100644 index 0000000000..7642873b7f --- /dev/null +++ b/tests/integration/explain/execute/query_deleted_docs_test.go @@ -0,0 +1,70 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain_execute + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" +) + +func TestExecuteExplainQueryDeletedDocs(t *testing.T) { + test := testUtils.TestCase{ + Description: "Explain (execute) query with deleted documents.", + + Actions: []any{ + explainUtils.SchemaForExplainTests, + create2AddressDocuments(), + testUtils.Request{ + Request: `mutation { + delete_ContactAddress(ids: ["bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692"]) { + _key + } + }`, + Results: []map[string]any{ + {"_key": "bae-f01bf83f-1507-5fb5-a6a3-09ecffa3c692"}, + }, + }, + testUtils.ExplainRequest{ + Request: `query @explain(type: execute) { + ContactAddress(showDeleted: true) { + city + country + } + }`, + + ExpectedFullGraph: []dataMap{ + { + "explain": dataMap{ + "executionSuccess": true, + "sizeOfResult": 2, + "planExecutions": uint64(3), + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "iterations": uint64(3), + "filterMatches": uint64(2), + "scanNode": dataMap{ + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(4), + }, + }, + }, + }, + }, + }, + }, + }, + } + + explainUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/explain/execute/scan_test.go b/tests/integration/explain/execute/scan_test.go index ab2b5348da..85bd64229c 100644 --- a/tests/integration/explain/execute/scan_test.go +++ b/tests/integration/explain/execute/scan_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainRequestWithAllDocumentsMatching(t *testing.T) { @@ -22,7 +23,7 @@ func TestExecuteExplainRequestWithAllDocumentsMatching(t *testing.T) { Description: "Explain (execute) request with all documents matching.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, testUtils.CreateDoc{ CollectionID: 2, @@ -44,7 +45,7 @@ func TestExecuteExplainRequestWithAllDocumentsMatching(t *testing.T) { }`, }, - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author { name @@ -52,7 +53,7 @@ func TestExecuteExplainRequestWithAllDocumentsMatching(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -63,9 +64,9 @@ func TestExecuteExplainRequestWithAllDocumentsMatching(t *testing.T) { "iterations": uint64(3), "filterMatches": uint64(2), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(4), }, }, }, @@ -76,7 +77,7 @@ func TestExecuteExplainRequestWithAllDocumentsMatching(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainRequestWithNoDocuments(t *testing.T) { @@ -85,16 +86,16 @@ func TestExecuteExplainRequestWithNoDocuments(t *testing.T) { Description: "Explain (execute) request with no documents.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author { name } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -105,9 +106,9 @@ func TestExecuteExplainRequestWithNoDocuments(t *testing.T) { "iterations": uint64(1), "filterMatches": uint64(0), "scanNode": dataMap{ - "iterations": uint64(1), - "docFetches": uint64(1), - "filterMatches": uint64(0), + "iterations": uint64(1), + "docFetches": uint64(0), + "fieldFetches": uint64(0), }, }, }, @@ -118,7 +119,7 @@ func TestExecuteExplainRequestWithNoDocuments(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainRequestWithSomeDocumentsMatching(t *testing.T) { @@ -127,7 +128,7 @@ func TestExecuteExplainRequestWithSomeDocumentsMatching(t *testing.T) { Description: "Explain (execute) request with some documents matching.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, testUtils.CreateDoc{ CollectionID: 2, @@ -149,7 +150,7 @@ func TestExecuteExplainRequestWithSomeDocumentsMatching(t *testing.T) { }`, }, - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author(filter: {name: {_eq: "Shahzad"}}) { name @@ -157,7 +158,7 @@ func TestExecuteExplainRequestWithSomeDocumentsMatching(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -168,9 +169,9 @@ func TestExecuteExplainRequestWithSomeDocumentsMatching(t *testing.T) { "iterations": uint64(2), "filterMatches": uint64(1), "scanNode": dataMap{ - "iterations": uint64(2), - "docFetches": uint64(3), - "filterMatches": uint64(1), + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(4), }, }, }, @@ -181,7 +182,7 @@ func TestExecuteExplainRequestWithSomeDocumentsMatching(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainRequestWithDocumentsButNoMatches(t *testing.T) { @@ -190,7 +191,7 @@ func TestExecuteExplainRequestWithDocumentsButNoMatches(t *testing.T) { Description: "Explain (execute) request with documents but no matches.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, testUtils.CreateDoc{ CollectionID: 2, @@ -212,7 +213,7 @@ func TestExecuteExplainRequestWithDocumentsButNoMatches(t *testing.T) { }`, }, - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author(filter: {name: {_eq: "John"}}) { name @@ -220,7 +221,7 @@ func TestExecuteExplainRequestWithDocumentsButNoMatches(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -231,9 +232,9 @@ func TestExecuteExplainRequestWithDocumentsButNoMatches(t *testing.T) { "iterations": uint64(1), "filterMatches": uint64(0), "scanNode": dataMap{ - "iterations": uint64(1), - "docFetches": uint64(3), - "filterMatches": uint64(0), + "iterations": uint64(1), + "docFetches": uint64(2), + "fieldFetches": uint64(4), }, }, }, @@ -244,5 +245,5 @@ func TestExecuteExplainRequestWithDocumentsButNoMatches(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/top_level_test.go b/tests/integration/explain/execute/top_level_test.go index 5053896b5d..6afa9cbfb2 100644 --- a/tests/integration/explain/execute/top_level_test.go +++ b/tests/integration/explain/execute/top_level_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainTopLevelAverageRequest(t *testing.T) { @@ -22,7 +23,7 @@ func TestExecuteExplainTopLevelAverageRequest(t *testing.T) { Description: "Explain (execute) request with top level average.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, testUtils.CreateDoc{ CollectionID: 2, @@ -44,7 +45,7 @@ func TestExecuteExplainTopLevelAverageRequest(t *testing.T) { }`, }, - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { _avg( Author: { @@ -53,7 +54,7 @@ func TestExecuteExplainTopLevelAverageRequest(t *testing.T) { ) }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -66,9 +67,9 @@ func TestExecuteExplainTopLevelAverageRequest(t *testing.T) { "iterations": uint64(3), "filterMatches": uint64(2), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -100,7 +101,7 @@ func TestExecuteExplainTopLevelAverageRequest(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainTopLevelCountRequest(t *testing.T) { @@ -109,7 +110,7 @@ func TestExecuteExplainTopLevelCountRequest(t *testing.T) { Description: "Explain (execute) request with top level count.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, testUtils.CreateDoc{ CollectionID: 2, @@ -131,12 +132,12 @@ func TestExecuteExplainTopLevelCountRequest(t *testing.T) { }`, }, - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { _count(Author: {}) }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -149,9 +150,9 @@ func TestExecuteExplainTopLevelCountRequest(t *testing.T) { "iterations": uint64(3), "filterMatches": uint64(2), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(4), }, }, }, @@ -170,7 +171,7 @@ func TestExecuteExplainTopLevelCountRequest(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainTopLevelSumRequest(t *testing.T) { @@ -179,7 +180,7 @@ func TestExecuteExplainTopLevelSumRequest(t *testing.T) { Description: "Explain (execute) request with top level sum.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, testUtils.CreateDoc{ CollectionID: 2, @@ -201,7 +202,7 @@ func TestExecuteExplainTopLevelSumRequest(t *testing.T) { }`, }, - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { _sum( Author: { @@ -210,7 +211,7 @@ func TestExecuteExplainTopLevelSumRequest(t *testing.T) { ) }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -223,9 +224,9 @@ func TestExecuteExplainTopLevelSumRequest(t *testing.T) { "iterations": uint64(3), "filterMatches": uint64(2), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -244,5 +245,5 @@ func TestExecuteExplainTopLevelSumRequest(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/type_join_test.go b/tests/integration/explain/execute/type_join_test.go index 2c3a2448c1..8e26f423bb 100644 --- a/tests/integration/explain/execute/type_join_test.go +++ b/tests/integration/explain/execute/type_join_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainRequestWithAOneToOneJoin(t *testing.T) { @@ -22,7 +23,7 @@ func TestExecuteExplainRequestWithAOneToOneJoin(t *testing.T) { Description: "Explain a one-to-one join relation query, with alias.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Authors create2AuthorDocuments(), @@ -30,7 +31,7 @@ func TestExecuteExplainRequestWithAOneToOneJoin(t *testing.T) { // Contacts create2AuthorContactDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author { OnlyEmail: contact { @@ -39,7 +40,7 @@ func TestExecuteExplainRequestWithAOneToOneJoin(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -52,9 +53,9 @@ func TestExecuteExplainRequestWithAOneToOneJoin(t *testing.T) { "typeIndexJoin": dataMap{ "iterations": uint64(3), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -66,7 +67,7 @@ func TestExecuteExplainRequestWithAOneToOneJoin(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { @@ -75,7 +76,7 @@ func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { Description: "Explain (execute) with two one-to-one join relation.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Authors create2AuthorDocuments(), @@ -83,7 +84,7 @@ func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { // Contacts create2AuthorContactDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author { OnlyEmail: contact { @@ -96,7 +97,7 @@ func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -111,9 +112,9 @@ func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { "typeIndexJoin": dataMap{ "iterations": uint64(3), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -121,9 +122,9 @@ func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { "typeIndexJoin": dataMap{ "iterations": uint64(3), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -137,7 +138,7 @@ func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainWithTwoLevelDeepNestedJoins(t *testing.T) { @@ -146,7 +147,7 @@ func TestExecuteExplainWithTwoLevelDeepNestedJoins(t *testing.T) { Description: "Explain (execute) with two nested level deep one to one join.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Authors create2AuthorDocuments(), @@ -157,7 +158,7 @@ func TestExecuteExplainWithTwoLevelDeepNestedJoins(t *testing.T) { // Addresses create2AddressDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author { name @@ -170,7 +171,7 @@ func TestExecuteExplainWithTwoLevelDeepNestedJoins(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -183,9 +184,9 @@ func TestExecuteExplainWithTwoLevelDeepNestedJoins(t *testing.T) { "typeIndexJoin": dataMap{ "iterations": uint64(3), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(4), }, }, }, @@ -197,5 +198,5 @@ func TestExecuteExplainWithTwoLevelDeepNestedJoins(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/update_test.go b/tests/integration/explain/execute/update_test.go index 5ff90d26c4..d9469e4b4e 100644 --- a/tests/integration/explain/execute/update_test.go +++ b/tests/integration/explain/execute/update_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainMutationRequestWithUpdateUsingIDs(t *testing.T) { @@ -22,12 +23,12 @@ func TestExecuteExplainMutationRequestWithUpdateUsingIDs(t *testing.T) { Description: "Explain (execute) mutation request with update using ids.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Addresses create2AddressDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `mutation @explain(type: execute) { update_ContactAddress( ids: [ @@ -41,7 +42,7 @@ func TestExecuteExplainMutationRequestWithUpdateUsingIDs(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -55,9 +56,9 @@ func TestExecuteExplainMutationRequestWithUpdateUsingIDs(t *testing.T) { "iterations": uint64(6), "filterMatches": uint64(4), "scanNode": dataMap{ - "iterations": uint64(6), - "docFetches": uint64(6), - "filterMatches": uint64(4), + "iterations": uint64(6), + "docFetches": uint64(4), + "fieldFetches": uint64(8), }, }, }, @@ -69,7 +70,7 @@ func TestExecuteExplainMutationRequestWithUpdateUsingIDs(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainMutationRequestWithUpdateUsingFilter(t *testing.T) { @@ -78,12 +79,12 @@ func TestExecuteExplainMutationRequestWithUpdateUsingFilter(t *testing.T) { Description: "Explain (execute) mutation request with update using filter.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Addresses create2AddressDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `mutation @explain(type: execute) { update_ContactAddress( filter: { @@ -98,7 +99,7 @@ func TestExecuteExplainMutationRequestWithUpdateUsingFilter(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -112,9 +113,9 @@ func TestExecuteExplainMutationRequestWithUpdateUsingFilter(t *testing.T) { "iterations": uint64(4), "filterMatches": uint64(2), "scanNode": dataMap{ - "iterations": uint64(4), - "docFetches": uint64(6), - "filterMatches": uint64(2), + "iterations": uint64(4), + "docFetches": uint64(4), + "fieldFetches": uint64(6), }, }, }, @@ -126,5 +127,5 @@ func TestExecuteExplainMutationRequestWithUpdateUsingFilter(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/with_average_test.go b/tests/integration/explain/execute/with_average_test.go index 6cd5ef79b2..a3070e8c42 100644 --- a/tests/integration/explain/execute/with_average_test.go +++ b/tests/integration/explain/execute/with_average_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainAverageRequestOnArrayField(t *testing.T) { @@ -22,12 +23,12 @@ func TestExecuteExplainAverageRequestOnArrayField(t *testing.T) { Description: "Explain (execute) request using average on array field.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Books create3BookDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Book { name @@ -35,7 +36,7 @@ func TestExecuteExplainAverageRequestOnArrayField(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -52,9 +53,9 @@ func TestExecuteExplainAverageRequestOnArrayField(t *testing.T) { "iterations": uint64(4), "filterMatches": uint64(3), "scanNode": dataMap{ - "iterations": uint64(4), - "docFetches": uint64(4), - "filterMatches": uint64(3), + "iterations": uint64(4), + "docFetches": uint64(3), + "fieldFetches": uint64(5), }, }, }, @@ -68,7 +69,7 @@ func TestExecuteExplainAverageRequestOnArrayField(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExplainExplainAverageRequestOnJoinedField(t *testing.T) { @@ -77,7 +78,7 @@ func TestExplainExplainAverageRequestOnJoinedField(t *testing.T) { Description: "Explain (execute) request using average on joined field.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Books create3BookDocuments(), @@ -85,7 +86,7 @@ func TestExplainExplainAverageRequestOnJoinedField(t *testing.T) { // Authors create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author { name @@ -93,7 +94,7 @@ func TestExplainExplainAverageRequestOnJoinedField(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -112,9 +113,9 @@ func TestExplainExplainAverageRequestOnJoinedField(t *testing.T) { "typeIndexJoin": dataMap{ "iterations": uint64(3), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -129,5 +130,5 @@ func TestExplainExplainAverageRequestOnJoinedField(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/with_count_test.go b/tests/integration/explain/execute/with_count_test.go index deba255b83..236d0bf8af 100644 --- a/tests/integration/explain/execute/with_count_test.go +++ b/tests/integration/explain/execute/with_count_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainRequestWithCountOnOneToManyRelation(t *testing.T) { @@ -22,7 +23,7 @@ func TestExecuteExplainRequestWithCountOnOneToManyRelation(t *testing.T) { Description: "Explain (execute) request with count on one to many relation.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Books create3BookDocuments(), @@ -30,7 +31,7 @@ func TestExecuteExplainRequestWithCountOnOneToManyRelation(t *testing.T) { // Authors create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author { name @@ -38,7 +39,7 @@ func TestExecuteExplainRequestWithCountOnOneToManyRelation(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -53,9 +54,9 @@ func TestExecuteExplainRequestWithCountOnOneToManyRelation(t *testing.T) { "typeIndexJoin": dataMap{ "iterations": uint64(3), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -68,5 +69,5 @@ func TestExecuteExplainRequestWithCountOnOneToManyRelation(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/with_limit_test.go b/tests/integration/explain/execute/with_limit_test.go index f3b48d0948..9a65ec1ec3 100644 --- a/tests/integration/explain/execute/with_limit_test.go +++ b/tests/integration/explain/execute/with_limit_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainRequestWithBothLimitAndOffsetOnParent(t *testing.T) { @@ -22,19 +23,19 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParent(t *testing.T) { Description: "Explain (execute) with both limit and offset on parent.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Books create3BookDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Book(limit: 1, offset: 1) { name } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -47,9 +48,9 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParent(t *testing.T) { "iterations": uint64(2), "filterMatches": uint64(2), "scanNode": dataMap{ - "iterations": uint64(2), - "docFetches": uint64(2), - "filterMatches": uint64(2), + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -61,7 +62,7 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParent(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainRequestWithBothLimitAndOffsetOnParentAndLimitOnChild(t *testing.T) { @@ -70,7 +71,7 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParentAndLimitOnChild(t *t Description: "Explain (execute) with both limit and offset on parent and limit on child.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Articles create3ArticleDocuments(), @@ -78,7 +79,7 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParentAndLimitOnChild(t *t // Authors create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author(limit: 1, offset: 1) { name @@ -88,7 +89,7 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParentAndLimitOnChild(t *t } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -103,9 +104,9 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParentAndLimitOnChild(t *t "typeIndexJoin": dataMap{ "iterations": uint64(2), "scanNode": dataMap{ - "iterations": uint64(2), - "docFetches": uint64(2), - "filterMatches": uint64(2), + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -118,5 +119,5 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParentAndLimitOnChild(t *t }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/with_order_test.go b/tests/integration/explain/execute/with_order_test.go index ae62f9d218..d5b7ccfaed 100644 --- a/tests/integration/explain/execute/with_order_test.go +++ b/tests/integration/explain/execute/with_order_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainRequestWithOrderFieldOnParent(t *testing.T) { @@ -22,12 +23,12 @@ func TestExecuteExplainRequestWithOrderFieldOnParent(t *testing.T) { Description: "Explain (execute) with order field on parent.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Authors create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author(order: {age: ASC}) { name @@ -35,7 +36,7 @@ func TestExecuteExplainRequestWithOrderFieldOnParent(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -48,9 +49,9 @@ func TestExecuteExplainRequestWithOrderFieldOnParent(t *testing.T) { "filterMatches": uint64(2), "iterations": uint64(3), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(4), }, }, }, @@ -62,7 +63,7 @@ func TestExecuteExplainRequestWithOrderFieldOnParent(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { @@ -71,7 +72,7 @@ func TestExecuteExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { Description: "Explain (execute) with multiple order fields on parent.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Authors testUtils.CreateDoc{ @@ -110,7 +111,7 @@ func TestExecuteExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { }`, }, - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author(order: {age: ASC, name: DESC}) { name @@ -118,7 +119,7 @@ func TestExecuteExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -131,9 +132,9 @@ func TestExecuteExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { "filterMatches": uint64(4), "iterations": uint64(5), "scanNode": dataMap{ - "iterations": uint64(5), - "docFetches": uint64(5), - "filterMatches": uint64(4), + "iterations": uint64(5), + "docFetches": uint64(4), + "fieldFetches": uint64(8), }, }, }, @@ -145,7 +146,7 @@ func TestExecuteExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainRequestWithOrderFieldOnChild(t *testing.T) { @@ -154,7 +155,7 @@ func TestExecuteExplainRequestWithOrderFieldOnChild(t *testing.T) { Description: "Explain (execute) with order field on child.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Articles create3ArticleDocuments(), @@ -162,7 +163,7 @@ func TestExecuteExplainRequestWithOrderFieldOnChild(t *testing.T) { // Authors create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author { name @@ -172,7 +173,7 @@ func TestExecuteExplainRequestWithOrderFieldOnChild(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -185,9 +186,9 @@ func TestExecuteExplainRequestWithOrderFieldOnChild(t *testing.T) { "typeIndexJoin": dataMap{ "iterations": uint64(3), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -199,7 +200,7 @@ func TestExecuteExplainRequestWithOrderFieldOnChild(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainRequestWithOrderFieldOnBothParentAndChild(t *testing.T) { @@ -208,7 +209,7 @@ func TestExecuteExplainRequestWithOrderFieldOnBothParentAndChild(t *testing.T) { Description: "Explain (execute) with order field on both parent and child.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Articles create3ArticleDocuments(), @@ -216,7 +217,7 @@ func TestExecuteExplainRequestWithOrderFieldOnBothParentAndChild(t *testing.T) { // Authors create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author(order: {age: ASC}) { name @@ -227,7 +228,7 @@ func TestExecuteExplainRequestWithOrderFieldOnBothParentAndChild(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -242,9 +243,9 @@ func TestExecuteExplainRequestWithOrderFieldOnBothParentAndChild(t *testing.T) { "typeIndexJoin": dataMap{ "iterations": uint64(3), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(4), }, }, }, @@ -257,7 +258,7 @@ func TestExecuteExplainRequestWithOrderFieldOnBothParentAndChild(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainRequestWhereParentFieldIsOrderedByChildField(t *testing.T) { @@ -266,7 +267,7 @@ func TestExecuteExplainRequestWhereParentFieldIsOrderedByChildField(t *testing.T Description: "Explain (execute) where parent field is ordered by child field.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Articles create3ArticleDocuments(), @@ -274,7 +275,7 @@ func TestExecuteExplainRequestWhereParentFieldIsOrderedByChildField(t *testing.T // Authors create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author( order: { @@ -288,35 +289,10 @@ func TestExecuteExplainRequestWhereParentFieldIsOrderedByChildField(t *testing.T } }`, - Results: []dataMap{ - { - "explain": dataMap{ - "executionSuccess": true, - "sizeOfResult": 2, - "planExecutions": uint64(3), - "selectTopNode": dataMap{ - "orderNode": dataMap{ - "iterations": uint64(3), - "selectNode": dataMap{ - "iterations": uint64(3), - "filterMatches": uint64(2), - "typeIndexJoin": dataMap{ - "iterations": uint64(3), - "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), - }, - }, - }, - }, - }, - }, - }, - }, + ExpectedError: "Argument \"order\" has invalid value {articles: {pages: ASC}}.\nIn field \"articles\": Unknown field.", }, }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/execute/with_sum_test.go b/tests/integration/explain/execute/with_sum_test.go index b42546ef2b..c6df56c2e0 100644 --- a/tests/integration/explain/execute/with_sum_test.go +++ b/tests/integration/explain/execute/with_sum_test.go @@ -14,6 +14,7 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) func TestExecuteExplainRequestWithSumOfInlineArrayField(t *testing.T) { @@ -22,12 +23,12 @@ func TestExecuteExplainRequestWithSumOfInlineArrayField(t *testing.T) { Description: "Explain (execute) request with sum on an inline array.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Books create3BookDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Book { name @@ -35,7 +36,7 @@ func TestExecuteExplainRequestWithSumOfInlineArrayField(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -48,9 +49,9 @@ func TestExecuteExplainRequestWithSumOfInlineArrayField(t *testing.T) { "iterations": uint64(4), "filterMatches": uint64(3), "scanNode": dataMap{ - "iterations": uint64(4), - "docFetches": uint64(4), - "filterMatches": uint64(3), + "iterations": uint64(4), + "docFetches": uint64(3), + "fieldFetches": uint64(5), }, }, }, @@ -62,7 +63,7 @@ func TestExecuteExplainRequestWithSumOfInlineArrayField(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } func TestExecuteExplainRequestSumOfRelatedOneToManyField(t *testing.T) { @@ -71,7 +72,7 @@ func TestExecuteExplainRequestSumOfRelatedOneToManyField(t *testing.T) { Description: "Explain (execute) request with sum of a related one to many field.", Actions: []any{ - gqlSchemaExecuteExplain(), + explainUtils.SchemaForExplainTests, // Articles create3ArticleDocuments(), @@ -79,7 +80,7 @@ func TestExecuteExplainRequestSumOfRelatedOneToManyField(t *testing.T) { // Authors create2AuthorDocuments(), - testUtils.Request{ + testUtils.ExplainRequest{ Request: `query @explain(type: execute) { Author { name @@ -91,7 +92,7 @@ func TestExecuteExplainRequestSumOfRelatedOneToManyField(t *testing.T) { } }`, - Results: []dataMap{ + ExpectedFullGraph: []dataMap{ { "explain": dataMap{ "executionSuccess": true, @@ -106,9 +107,9 @@ func TestExecuteExplainRequestSumOfRelatedOneToManyField(t *testing.T) { "typeIndexJoin": dataMap{ "iterations": uint64(3), "scanNode": dataMap{ - "iterations": uint64(3), - "docFetches": uint64(3), - "filterMatches": uint64(2), + "iterations": uint64(3), + "docFetches": uint64(2), + "fieldFetches": uint64(2), }, }, }, @@ -121,5 +122,5 @@ func TestExecuteExplainRequestSumOfRelatedOneToManyField(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/fixture.go b/tests/integration/explain/fixture.go new file mode 100644 index 0000000000..31b819e650 --- /dev/null +++ b/tests/integration/explain/fixture.go @@ -0,0 +1,63 @@ +// Copyright 2022 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package test_explain + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +var SchemaForExplainTests = testUtils.SchemaUpdate{ + Schema: (` + type Article { + name: String + author: Author + pages: Int + } + + type Book { + name: String + author: Author + pages: Int + chapterPages: [Int!] + } + + type Author { + name: String + age: Int + verified: Boolean + books: [Book] + articles: [Article] + contact: AuthorContact + } + + type AuthorContact { + cell: String + email: String + author: Author + address: ContactAddress + } + + type ContactAddress { + city: String + country: String + contact: AuthorContact + } + `), +} + +func ExecuteTestCase(t *testing.T, test testUtils.TestCase) { + testUtils.ExecuteTestCase( + t, + test, + ) +} diff --git a/tests/integration/explain/simple/basic_test.go b/tests/integration/explain/simple/basic_test.go index 4fab76ab81..9920458952 100644 --- a/tests/integration/explain/simple/basic_test.go +++ b/tests/integration/explain/simple/basic_test.go @@ -14,43 +14,44 @@ import ( "testing" testUtils "github.com/sourcenetwork/defradb/tests/integration" + explainUtils "github.com/sourcenetwork/defradb/tests/integration/explain" ) +type dataMap = map[string]any + func TestSimpleExplainRequest(t *testing.T) { - test := testUtils.RequestTestCase{ - Description: "Explain (simple) a basic request.", + test := testUtils.TestCase{ + Description: "Explain (simple) a basic request, assert full graph.", - Request: `query @explain(type: simple) { - Author { - _key - name - age - } - }`, + Actions: []any{ + explainUtils.SchemaForExplainTests, - Docs: map[int][]string{ - 2: { - `{ - "name": "John", - "age": 21 + testUtils.ExplainRequest{ + Request: `query @explain(type: simple) { + Author { + _key + name + age + } }`, - }, - }, - Results: []dataMap{ - { - "explain": dataMap{ - "selectTopNode": dataMap{ - "selectNode": dataMap{ - "filter": nil, - "scanNode": dataMap{ - "filter": nil, - "collectionID": "3", - "collectionName": "Author", - "spans": []dataMap{ - { - "start": "/3", - "end": "/4", + ExpectedFullGraph: []dataMap{ + { + "explain": dataMap{ + "selectTopNode": dataMap{ + "selectNode": dataMap{ + "_keys": nil, + "filter": nil, + "scanNode": dataMap{ + "filter": nil, + "collectionID": "3", + "collectionName": "Author", + "spans": []dataMap{ + { + "start": "/3", + "end": "/4", + }, + }, }, }, }, @@ -61,5 +62,5 @@ func TestSimpleExplainRequest(t *testing.T) { }, } - executeTestCase(t, test) + explainUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/explain/simple/utils.go b/tests/integration/explain/simple/utils.go deleted file mode 100644 index 370f673237..0000000000 --- a/tests/integration/explain/simple/utils.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package test_explain_simple - -import ( - "testing" - - testUtils "github.com/sourcenetwork/defradb/tests/integration" -) - -type dataMap = map[string]any - -var bookAuthorGQLSchema = (` - type Article { - name: String - author: Author - pages: Int - } - - type Book { - name: String - author: Author - pages: Int - chapterPages: [Int!] - } - - type Author { - name: String - age: Int - verified: Boolean - books: [Book] - articles: [Article] - contact: AuthorContact - } - - type AuthorContact { - cell: String - email: String - author: Author - address: ContactAddress - } - - type ContactAddress { - city: String - country: String - contact: AuthorContact - } - -`) - -// TODO: This should be resolved in https://github.com/sourcenetwork/defradb/issues/953. -func executeTestCase(t *testing.T, test testUtils.RequestTestCase) { - testUtils.ExecuteRequestTestCase( - t, - bookAuthorGQLSchema, - []string{"Article", "Book", "Author", "AuthorContact", "AontactAddress"}, - test, - ) -} - -// TODO: This comment is removed in PR that resolves https://github.com/sourcenetwork/defradb/issues/953 -//func executeExplainTestCase(t *testing.T, test explainUtils.ExplainRequestTestCase) { -// explainUtils.ExecuteExplainRequestTestCase( -// t, -// bookAuthorGQLSchema, -// []string{"article", "book", "author", "authorContact", "contactAddress"}, -// test, -// ) -//} diff --git a/tests/integration/index/create_drop_test.go b/tests/integration/index/create_drop_test.go new file mode 100644 index 0000000000..43635116e7 --- /dev/null +++ b/tests/integration/index/create_drop_test.go @@ -0,0 +1,63 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestIndexDrop_ShouldNotHinderQuerying(t *testing.T) { + test := testUtils.TestCase{ + Description: "Drop index should not hinder querying", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + Name: String @index + Age: Int + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-52b9170d-b77a-5887-b877-cbdbb99b009f + Doc: ` + { + "Name": "John", + "Age": 21 + }`, + }, + testUtils.DropIndex{ + CollectionID: 0, + IndexID: 0, + }, + testUtils.Request{ + Request: ` + query { + Users { + Name + Age + } + }`, + Results: []map[string]any{ + { + "Name": "John", + "Age": uint64(21), + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/create_get_test.go b/tests/integration/index/create_get_test.go new file mode 100644 index 0000000000..2e758bb637 --- /dev/null +++ b/tests/integration/index/create_get_test.go @@ -0,0 +1,61 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestIndexGet_ShouldReturnListOfExistingIndexes(t *testing.T) { + test := testUtils.TestCase{ + Description: "Getting indexes should return list of existing indexes", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users @index(name: "age_index", fields: ["Age"]) { + Name: String @index(name: "name_index") + Age: Int + } + `, + }, + testUtils.GetIndexes{ + CollectionID: 0, + ExpectedIndexes: []client.IndexDescription{ + { + Name: "name_index", + ID: 1, + Fields: []client.IndexedFieldDescription{ + { + Name: "Name", + Direction: client.Ascending, + }, + }, + }, + { + Name: "age_index", + ID: 2, + Fields: []client.IndexedFieldDescription{ + { + Name: "Age", + Direction: client.Ascending, + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/create_test.go b/tests/integration/index/create_test.go new file mode 100644 index 0000000000..15cbac530e --- /dev/null +++ b/tests/integration/index/create_test.go @@ -0,0 +1,131 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + "github.com/sourcenetwork/defradb/request/graphql/schema" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestIndexCreateWithCollection_ShouldNotHinderQuerying(t *testing.T) { + test := testUtils.TestCase{ + Description: "Creation of index with collection should not hinder querying", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + Name: String @index + Age: Int + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-52b9170d-b77a-5887-b877-cbdbb99b009f + Doc: ` + { + "Name": "John", + "Age": 21 + }`, + }, + testUtils.Request{ + Request: ` + query { + Users { + Name + Age + } + }`, + Results: []map[string]any{ + { + "Name": "John", + "Age": uint64(21), + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestIndexCreate_ShouldNotHinderQuerying(t *testing.T) { + test := testUtils.TestCase{ + Description: "Creation of index separately from a collection should not hinder querying", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + Name: String + Age: Int + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-52b9170d-b77a-5887-b877-cbdbb99b009f + Doc: ` + { + "Name": "John", + "Age": 21 + }`, + }, + testUtils.CreateIndex{ + CollectionID: 0, + IndexName: "some_index", + FieldName: "Name", + }, + testUtils.Request{ + Request: ` + query { + Users { + Name + Age + } + }`, + Results: []map[string]any{ + { + "Name": "John", + "Age": uint64(21), + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestIndexCreate_IfInvalidIndexName_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Description: "If invalid index name is provided, return error", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + Name: String + Age: Int + } + `, + }, + testUtils.CreateIndex{ + CollectionID: 0, + IndexName: "!", + FieldName: "Name", + ExpectedError: schema.NewErrIndexWithInvalidName("!").Error(), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/drop_test.go b/tests/integration/index/drop_test.go new file mode 100644 index 0000000000..ae2984854d --- /dev/null +++ b/tests/integration/index/drop_test.go @@ -0,0 +1,64 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestIndexDrop_IfIndexDoesNotExist_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Description: "Drop index should return error if index does not exist", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + Name: String + Age: Int + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-52b9170d-b77a-5887-b877-cbdbb99b009f + Doc: ` + { + "Name": "John", + "Age": 21 + }`, + }, + testUtils.DropIndex{ + CollectionID: 0, + IndexName: "non_existing_index", + ExpectedError: "index with name doesn't exists. Name: non_existing_index", + }, + testUtils.Request{ + Request: ` + query { + Users { + Name + Age + } + }`, + Results: []map[string]any{ + { + "Name": "John", + "Age": uint64(21), + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/get_test.go b/tests/integration/index/get_test.go new file mode 100644 index 0000000000..09308a51cf --- /dev/null +++ b/tests/integration/index/get_test.go @@ -0,0 +1,40 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestIndexGet_IfThereAreNoIndexes_ReturnEmptyList(t *testing.T) { + test := testUtils.TestCase{ + Description: "Getting indexes should return empty list if there are no indexes", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + Name: String + Age: Int + } + `, + }, + testUtils.GetIndexes{ + CollectionID: 0, + ExpectedIndexes: []client.IndexDescription{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/lens.go b/tests/integration/lens.go new file mode 100644 index 0000000000..dbdb4c1c70 --- /dev/null +++ b/tests/integration/lens.go @@ -0,0 +1,81 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "github.com/sourcenetwork/immutable" + "github.com/stretchr/testify/assert" + + "github.com/sourcenetwork/defradb/client" +) + +// ConfigureMigration is a test action which will configure a Lens migration using the +// provided configuration. +type ConfigureMigration struct { + // NodeID is the node ID (index) of the node in which to configure the migration. + NodeID immutable.Option[int] + + // Used to identify the transaction for this to run against. Optional. + TransactionID immutable.Option[int] + + // The configuration to use. + // + // Paths to WASM Lens modules may be found in: github.com/sourcenetwork/defradb/tests/lenses + client.LensConfig + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} + +// GetMigrations is a test action which will fetch and assert on the results of calling +// `LensRegistry().Config()`. +type GetMigrations struct { + // NodeID is the node ID (index) of the node in which to configure the migration. + NodeID immutable.Option[int] + + // Used to identify the transaction for this to run against. Optional. + TransactionID immutable.Option[int] + + // The expected configuration. + ExpectedResults []client.LensConfig +} + +func configureMigration( + s *state, + action ConfigureMigration, +) { + for _, node := range getNodes(action.NodeID, s.nodes) { + db := getStore(s, node.DB, action.TransactionID, action.ExpectedError) + + err := db.SetMigration(s.ctx, action.LensConfig) + expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) + } +} + +func getMigrations( + s *state, + action GetMigrations, +) { + for _, node := range getNodes(action.NodeID, s.nodes) { + db := getStore(s, node.DB, action.TransactionID, "") + + configs := db.LensRegistry().Config() + + // The order of the results is not deterministic, so do not assert on the element + // locations. + assert.ElementsMatch(s.t, configs, action.ExpectedResults) + } +} diff --git a/tests/integration/mutation/one_to_many/create/with_alias_test.go b/tests/integration/mutation/one_to_many/create/with_alias_test.go new file mode 100644 index 0000000000..990ae8f653 --- /dev/null +++ b/tests/integration/mutation/one_to_many/create/with_alias_test.go @@ -0,0 +1,269 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package create + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + fixture "github.com/sourcenetwork/defradb/tests/integration/mutation/one_to_many" +) + +func TestMutationCreateOneToMany_AliasedRelationNameWithInvalidField_Error(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to many create mutation, with an invalid field, with alias.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Book(data: "{\"notName\": \"Painted House\",\"author\": \"bae-fd541c25-229e-5280-b44b-e5c2af3e374d\"}") { + name + } + }`, + ExpectedError: "The given field does not exist. Name: notName", + }, + }, + } + fixture.ExecuteTestCase(t, test) +} + +func TestMutationCreateOneToMany_AliasedRelationNameNonExistingRelationSingleSide_NoIDFieldError(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to many create mutation, non-existing id, from the single side, no id relation field, with alias.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\",\"published\": \"bae--b44b-e5c2af3e374d\"}") { + name + } + }`, + ExpectedError: "The given field does not exist. Name: published", + }, + }, + } + fixture.ExecuteTestCase(t, test) +} + +// Note: This test should probably not pass, as it contains a +// reference to a document that doesnt exist. +func TestMutationCreateOneToMany_AliasedRelationNameNonExistingRelationManySide_CreatedDoc(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to many create mutation, non-existing id, from the many side, with alias", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"bae-fd541c25-229e-5280-b44b-e5c2af3e374d\"}") { + name + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + }, + } + fixture.ExecuteTestCase(t, test) +} + +func TestMutationCreateOneToMany_AliasedRelationNamToLinkFromSingleSide_NoIDFieldError(t *testing.T) { + bookKey := "bae-3d236f89-6a31-5add-a36a-27971a2eac76" + + test := testUtils.TestCase{ + Description: "One to many create mutation with relation id from single side, with alias.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Book(data: "{\"name\": \"Painted House\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": bookKey, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Author(data: "{\"name\": \"John Grisham\",\"published\": \"%s\"}") { + name + } + }`, + bookKey, + ), + ExpectedError: "The given field does not exist. Name: published", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationCreateOneToMany_AliasedRelationNameToLinkFromManySide(t *testing.T) { + authorKey := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + + test := testUtils.TestCase{ + Description: "One to many create mutation using relation id from many side, with alias.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": authorKey, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + name + } + }`, + authorKey, + ), + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToMany_AliasRelationNameAndInternalIDBothProduceSameDocID(t *testing.T) { + // These keys MUST be shared by both tests below. + authorKey := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + nonAliasedTest := testUtils.TestCase{ + Description: "One to many update mutation using relation alias name from single side (wrong)", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": authorKey, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + _key + name + } + }`, + authorKey, + ), + Results: []map[string]any{ + { + "_key": bookKey, // Must be same as below. + "name": "Painted House", + }, + }, + }, + }, + } + fixture.ExecuteTestCase(t, nonAliasedTest) + + // Check that `bookKey` is same in both above and the alised version below. + // Note: Everything should be same, only diff should be the use of alias. + + aliasedTest := testUtils.TestCase{ + Description: "One to many update mutation using relation alias name from single side (wrong)", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": authorKey, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + _key + name + } + }`, + authorKey, + ), + Results: []map[string]any{ + { + "_key": bookKey, // Must be same as below. + "name": "Painted House", + }, + }, + }, + }, + } + fixture.ExecuteTestCase(t, aliasedTest) +} diff --git a/tests/integration/mutation/one_to_many/create/with_simple_test.go b/tests/integration/mutation/one_to_many/create/with_simple_test.go new file mode 100644 index 0000000000..a2c3ad3545 --- /dev/null +++ b/tests/integration/mutation/one_to_many/create/with_simple_test.go @@ -0,0 +1,188 @@ +// Copyright 2022 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package create + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + fixture "github.com/sourcenetwork/defradb/tests/integration/mutation/one_to_many" +) + +func TestMutationCreateOneToMany_WithInvalidField_Error(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to many create mutation, with an invalid field.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Book(data: "{\"notName\": \"Painted House\",\"author_id\": \"bae-fd541c25-229e-5280-b44b-e5c2af3e374d\"}") { + name + } + }`, + ExpectedError: "The given field does not exist. Name: notName", + }, + }, + } + fixture.ExecuteTestCase(t, test) +} + +func TestMutationCreateOneToMany_NonExistingRelationSingleSide_NoIDFieldError(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to many create mutation, non-existing id, from the single side, no id relation field.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\",\"published_id\": \"bae--b44b-e5c2af3e374d\"}") { + name + } + }`, + ExpectedError: "The given field does not exist. Name: published_id", + }, + }, + } + fixture.ExecuteTestCase(t, test) +} + +// Note: This test should probably not pass, as it contains a +// reference to a document that doesnt exist. +func TestMutationCreateOneToMany_NonExistingRelationManySide_CreatedDoc(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to many create mutation, non-existing id, from the many side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"bae-fd541c25-229e-5280-b44b-e5c2af3e374d\"}") { + name + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + }, + } + fixture.ExecuteTestCase(t, test) +} + +func TestMutationCreateOneToMany_RelationIDToLinkFromSingleSide_NoIDFieldError(t *testing.T) { + bookKey := "bae-3d236f89-6a31-5add-a36a-27971a2eac76" + + test := testUtils.TestCase{ + Description: "One to many create mutation with relation id from single side.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Book(data: "{\"name\": \"Painted House\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": bookKey, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Author(data: "{\"name\": \"John Grisham\",\"published_id\": \"%s\"}") { + name + } + }`, + bookKey, + ), + ExpectedError: "The given field does not exist. Name: published_id", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationCreateOneToMany_RelationIDToLinkFromManySide(t *testing.T) { + authorKey := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + + test := testUtils.TestCase{ + Description: "One to many create mutation using relation id from many side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": authorKey, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + name + } + }`, + authorKey, + ), + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/one_to_many/delete/with_show_deleted_test.go b/tests/integration/mutation/one_to_many/delete/with_show_deleted_test.go index 009ac1d197..9b97fb3a0f 100644 --- a/tests/integration/mutation/one_to_many/delete/with_show_deleted_test.go +++ b/tests/integration/mutation/one_to_many/delete/with_show_deleted_test.go @@ -18,6 +18,7 @@ import ( "github.com/sourcenetwork/defradb/client" testUtils "github.com/sourcenetwork/defradb/tests/integration" + fixture "github.com/sourcenetwork/defradb/tests/integration/mutation/one_to_many" ) func TestDeletionOfADocumentUsingSingleKeyWithShowDeletedDocumentQuery(t *testing.T) { @@ -45,22 +46,8 @@ func TestDeletionOfADocumentUsingSingleKeyWithShowDeletedDocumentQuery(t *testin // require.NoError(t, err) test := testUtils.TestCase{ + Description: "One to many delete document using single key show deleted.", Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Book { - name: String - rating: Float - author: Author - } - - type Author { - name: String - age: Int - published: [Book] - } - `, - }, testUtils.CreateDoc{ CollectionID: 1, Doc: jsonString1, @@ -121,5 +108,5 @@ func TestDeletionOfADocumentUsingSingleKeyWithShowDeletedDocumentQuery(t *testin }, } - testUtils.ExecuteTestCase(t, []string{"Book", "Author"}, test) + fixture.ExecuteTestCase(t, test) } diff --git a/tests/integration/mutation/one_to_many/update/related_object_link_test.go b/tests/integration/mutation/one_to_many/update/related_object_link_test.go new file mode 100644 index 0000000000..5c9f235b1a --- /dev/null +++ b/tests/integration/mutation/one_to_many/update/related_object_link_test.go @@ -0,0 +1,359 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package update + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + fixture "github.com/sourcenetwork/defradb/tests/integration/mutation/one_to_many" +) + +func TestMutationUpdateOneToMany_RelationIDToLinkFromSingleSide_Error(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to many update mutation using relation id from single side (wrong)", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ // NOTE: There is no `published_id` on book. + Request: fmt.Sprintf( + `mutation { + update_Author(id: "%s", data: "{\"published_id\": \"%s\"}") { + name + } + }`, + author2Key, + bookKey, + ), + ExpectedError: "The given field does not exist. Name: published_id", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +// Note: This test should probably not pass, as it contains a +// reference to a document that doesnt exist. +func TestMutationUpdateOneToMany_InvalidRelationIDToLinkFromManySide(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + invalidAuthorKey := "bae-35953ca-518d-9e6b-9ce6cd00eff5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to many update mutation using relation id from many side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"author_id\": \"%s\"}") { + name + } + }`, + bookKey, + invalidAuthorKey, + ), + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": []map[string]any{}, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": nil, // Linked to incorrect id + }, + }, + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToMany_RelationIDToLinkFromManySideWithWrongField_Error(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to many update mutation using relation id from many side, with a wrong field.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"notName\": \"Unpainted Condo\",\"author_id\": \"%s\"}") { + name + } + }`, + bookKey, + author2Key, + ), + ExpectedError: "The given field does not exist. Name: notName", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToMany_RelationIDToLinkFromManySide(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to many update mutation using relation id from many side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"author_id\": \"%s\"}") { + name + } + }`, + bookKey, + author2Key, + ), + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": []map[string]any{}, + }, + { + "name": "New Shahzad", + "published": []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "New Shahzad", + }, + }, + }, + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/one_to_many/update/related_object_link_with_alias_test.go b/tests/integration/mutation/one_to_many/update/related_object_link_with_alias_test.go new file mode 100644 index 0000000000..f579796e96 --- /dev/null +++ b/tests/integration/mutation/one_to_many/update/related_object_link_with_alias_test.go @@ -0,0 +1,359 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package update + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + fixture "github.com/sourcenetwork/defradb/tests/integration/mutation/one_to_many" +) + +func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromSingleSide_Error(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to many update mutation using relation alias name from single side (wrong)", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ // NOTE: There is no `published_id` and so `published` alias is invalid use on book. + Request: fmt.Sprintf( + `mutation { + update_Author(id: "%s", data: "{\"published\": \"%s\"}") { + name + } + }`, + author2Key, + bookKey, + ), + ExpectedError: "The given field or alias to field does not exist. Name: published", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +// Note: This test should probably not pass, as it contains a +// reference to a document that doesnt exist. +func TestMutationUpdateOneToMany_InvalidAliasRelationNameToLinkFromManySide(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + invalidAuthorKey := "bae-35953ca-518d-9e6b-9ce6cd00eff5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to many update mutation using relation alias name from many side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"author\": \"%s\"}") { + name + } + }`, + bookKey, + invalidAuthorKey, + ), + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": []map[string]any{}, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": nil, // Linked to incorrect id + }, + }, + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromManySideWithWrongField_Error(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to many update mutation using relation alias name from many side, with a wrong field.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"notName\": \"Unpainted Condo\",\"author\": \"%s\"}") { + name + } + }`, + bookKey, + author2Key, + ), + ExpectedError: "The given field does not exist. Name: notName", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToMany_AliasRelationNameToLinkFromManySide(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to many update mutation using relation alias name from many side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"author\": \"%s\"}") { + name + } + }`, + bookKey, + author2Key, + ), + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": []map[string]any{}, + }, + { + "name": "New Shahzad", + "published": []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "New Shahzad", + }, + }, + }, + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/one_to_many/utils.go b/tests/integration/mutation/one_to_many/utils.go new file mode 100644 index 0000000000..2b81a7321e --- /dev/null +++ b/tests/integration/mutation/one_to_many/utils.go @@ -0,0 +1,46 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_many + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func ExecuteTestCase(t *testing.T, test testUtils.TestCase) { + testUtils.ExecuteTestCase( + t, + testUtils.TestCase{ + Description: test.Description, + Actions: append( + []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author + } + + type Author { + name: String + age: Int + published: [Book] + } + `, + }, + }, + test.Actions..., + ), + }, + ) +} diff --git a/tests/integration/mutation/one_to_one/create/with_alias_test.go b/tests/integration/mutation/one_to_one/create/with_alias_test.go new file mode 100644 index 0000000000..9543795ead --- /dev/null +++ b/tests/integration/mutation/one_to_one/create/with_alias_test.go @@ -0,0 +1,226 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package create + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + simpleTests "github.com/sourcenetwork/defradb/tests/integration/mutation/one_to_one" +) + +func TestMutationCreateOneToOne_UseAliasWithInvalidField_Error(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to one create mutation, alias relation, with an invalid field.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"notName\": \"John Grisham\",\"published\": \"bae-fd541c25-229e-5280-b44b-e5c2af3e374d\"}") { + name + } + }`, + ExpectedError: "The given field does not exist. Name: notName", + }, + }, + } + simpleTests.ExecuteTestCase(t, test) +} + +// Note: This test should probably not pass, as it contains a +// reference to a document that doesnt exist. +func TestMutationCreateOneToOne_UseAliasWithNonExistingRelationPrimarySide_CreatedDoc(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to one create mutation, alias relation, from the wrong side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\",\"published\": \"bae-fd541c25-229e-5280-b44b-e5c2af3e374d\"}") { + name + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + }, + }, + }, + }, + } + simpleTests.ExecuteTestCase(t, test) +} + +func TestMutationCreateOneToOne_UseAliasWithNonExistingRelationSecondarySide_Error(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to one create mutation, alias relation, from the secondary side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"bae-fd541c25-229e-5280-b44b-e5c2af3e374d\"}") { + name + } + }`, + ExpectedError: "no document for the given key exists", + }, + }, + } + simpleTests.ExecuteTestCase(t, test) +} + +func TestMutationCreateOneToOne_UseAliasedRelationNameToLink_QueryFromPrimarySide(t *testing.T) { + bookKey := "bae-3d236f89-6a31-5add-a36a-27971a2eac76" + + test := testUtils.TestCase{ + Description: "One to one create mutation with an alias relation.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Book(data: "{\"name\": \"Painted House\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": bookKey, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Author(data: "{\"name\": \"John Grisham\",\"published\": \"%s\"}") { + name + } + }`, + bookKey, + ), + Results: []map[string]any{ + { + "name": "John Grisham", + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": map[string]any{ + "name": "Painted House", + }, + }, + }, + }, + }, + } + + simpleTests.ExecuteTestCase(t, test) +} + +func TestMutationCreateOneToOne_UseAliasedRelationNameToLink_QueryFromSecondarySide(t *testing.T) { + authorKey := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + + test := testUtils.TestCase{ + Description: "One to one create mutation from secondary side with alias relation.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": authorKey, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + name + } + }`, + authorKey, + ), + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": map[string]any{ + "name": "Painted House", + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + simpleTests.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/one_to_one/create/with_simple_test.go b/tests/integration/mutation/one_to_one/create/with_simple_test.go index 16c9373ce6..bc37265dd2 100644 --- a/tests/integration/mutation/one_to_one/create/with_simple_test.go +++ b/tests/integration/mutation/one_to_one/create/with_simple_test.go @@ -18,6 +18,23 @@ import ( simpleTests "github.com/sourcenetwork/defradb/tests/integration/mutation/one_to_one" ) +func TestMutationCreateOneToOne_WithInvalidField_Error(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to one create mutation, with an invalid field.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"notName\": \"John Grisham\",\"published_id\": \"bae-fd541c25-229e-5280-b44b-e5c2af3e374d\"}") { + name + } + }`, + ExpectedError: "The given field does not exist. Name: notName", + }, + }, + } + simpleTests.ExecuteTestCase(t, test) +} + // Note: This test should probably not pass, as it contains a // reference to a document that doesnt exist. func TestMutationCreateOneToOneNoChild(t *testing.T) { @@ -41,6 +58,23 @@ func TestMutationCreateOneToOneNoChild(t *testing.T) { simpleTests.ExecuteTestCase(t, test) } +func TestMutationCreateOneToOne_NonExistingRelationSecondarySide_Error(t *testing.T) { + test := testUtils.TestCase{ + Description: "One to one create mutation, from the secondary side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"bae-fd541c25-229e-5280-b44b-e5c2af3e374d\"}") { + name + } + }`, + ExpectedError: "no document for the given key exists", + }, + }, + } + simpleTests.ExecuteTestCase(t, test) +} + func TestMutationCreateOneToOne(t *testing.T) { bookKey := "bae-3d236f89-6a31-5add-a36a-27971a2eac76" diff --git a/tests/integration/mutation/one_to_one/update/related_object_link_test.go b/tests/integration/mutation/one_to_one/update/related_object_link_test.go new file mode 100644 index 0000000000..5928b591fb --- /dev/null +++ b/tests/integration/mutation/one_to_one/update/related_object_link_test.go @@ -0,0 +1,437 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package update + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + fixture "github.com/sourcenetwork/defradb/tests/integration/mutation/one_to_one" +) + +// Note: This test should probably not pass, as even after updating a link to a new document +// from one side the previous link still remains on the other side of the link. +func TestMutationUpdateOneToOne_RelationIDToLinkFromPrimarySide(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to one update mutation using relation id from single side (wrong)", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Author(id: "%s", data: "{\"published_id\": \"%s\"}") { + name + } + }`, + author2Key, + bookKey, + ), + Results: []map[string]any{ + { + "name": "New Shahzad", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": map[string]any{ + "name": "Painted House", + }, + }, + { + "name": "New Shahzad", + "published": map[string]any{ + "name": "Painted House", + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +// Note: This test should probably not pass, as even after updating a link to a new document +// from one side the previous link still remains on the other side of the link. +func TestMutationUpdateOneToOne_RelationIDToLinkFromSecondarySide(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to one update mutation using relation id from secondary side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"author_id\": \"%s\"}") { + name + } + }`, + bookKey, + author2Key, + ), + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": map[string]any{ + "name": "Painted House", + }, + }, + { + "name": "New Shahzad", + "published": map[string]any{ + "name": "Painted House", + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToOne_InvalidLengthRelationIDToLink_Error(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + invalidLenSubKey := "35953ca-518d-9e6b-9ce6cd00eff5" + invalidAuthorKey := "bae-" + invalidLenSubKey + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to one update mutation using invalid relation id", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"author_id\": \"%s\"}") { + name + } + }`, + bookKey, + invalidAuthorKey, + ), + ExpectedError: "uuid: incorrect UUID length 30 in string \"" + invalidLenSubKey + "\"", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToOne_InvalidRelationIDToLinkFromSecondarySide_Error(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + invalidAuthorKey := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ee" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to one update mutation using relation id from secondary side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"author_id\": \"%s\"}") { + name + } + }`, + bookKey, + invalidAuthorKey, + ), + ExpectedError: "no document for the given key exists", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToOne_RelationIDToLinkFromSecondarySideWithWrongField_Error(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to one update mutation using relation id from secondary side, with a wrong field.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author_id\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"notName\": \"Unpainted Condo\",\"author_id\": \"%s\"}") { + name + } + }`, + bookKey, + author2Key, + ), + ExpectedError: "The given field does not exist. Name: notName", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/one_to_one/update/related_object_link_with_alias_test.go b/tests/integration/mutation/one_to_one/update/related_object_link_with_alias_test.go new file mode 100644 index 0000000000..271edf26d1 --- /dev/null +++ b/tests/integration/mutation/one_to_one/update/related_object_link_with_alias_test.go @@ -0,0 +1,427 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package update + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" + fixture "github.com/sourcenetwork/defradb/tests/integration/mutation/one_to_one" +) + +// Note: This test should probably not pass, as even after updating a link to a new document +// from one side the previous link still remains on the other side of the link. +func TestMutationUpdateOneToOne_AliasRelationNameToLinkFromPrimarySide(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to one update mutation using alias relation id from single side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Author(id: "%s", data: "{\"published\": \"%s\"}") { + name + } + }`, + author2Key, + bookKey, + ), + Results: []map[string]any{ + { + "name": "New Shahzad", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": map[string]any{ + "name": "Painted House", + }, + }, + { + "name": "New Shahzad", + "published": map[string]any{ + "name": "Painted House", + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +// Note: This test should probably not pass, as even after updating a link to a new document +// from one side the previous link still remains on the other side of the link. +func TestMutationUpdateOneToOne_AliasRelationNameToLinkFromSecondarySide(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to one update mutation using alias relation id from secondary side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"author\": \"%s\"}") { + name + } + }`, + bookKey, + author2Key, + ), + Results: []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: `query { + Author { + name + published { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "John Grisham", + "published": map[string]any{ + "name": "Painted House", + }, + }, + { + "name": "New Shahzad", + "published": map[string]any{ + "name": "Painted House", + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Book { + name + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToOne_AliasWithInvalidLengthRelationIDToLink_Error(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + invalidLenSubKey := "35953ca-518d-9e6b-9ce6cd00eff5" + invalidAuthorKey := "bae-" + invalidLenSubKey + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to one update mutation using invalid alias relation id", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"author\": \"%s\"}") { + name + } + }`, + bookKey, + invalidAuthorKey, + ), + ExpectedError: "uuid: incorrect UUID length 30 in string \"" + invalidLenSubKey + "\"", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToOne_InvalidAliasRelationNameToLinkFromSecondarySide_Error(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + invalidAuthorKey := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ee" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to one update mutation using alias relation id from secondary side", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"author\": \"%s\"}") { + name + } + }`, + bookKey, + invalidAuthorKey, + ), + ExpectedError: "no document for the given key exists", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToOne_AliasRelationNameToLinkFromSecondarySideWithWrongField_Error(t *testing.T) { + author1Key := "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + author2Key := "bae-35953caf-4898-518d-9e6b-9ce6cd86ebe5" + bookKey := "bae-22e0a1c2-d12b-5bfd-b039-0cf72f963991" + + test := testUtils.TestCase{ + Description: "One to one update mutation using relation alias name from secondary side, with a wrong field.", + Actions: []any{ + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"John Grisham\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author1Key, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + create_Author(data: "{\"name\": \"New Shahzad\"}") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": author2Key, + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + create_Book(data: "{\"name\": \"Painted House\",\"author\": \"%s\"}") { + _key + name + } + }`, + author1Key, + ), + Results: []map[string]any{ + { + "_key": bookKey, + "name": "Painted House", + }, + }, + }, + testUtils.Request{ + Request: fmt.Sprintf( + `mutation { + update_Book(id: "%s", data: "{\"notName\": \"Unpainted Condo\",\"author\": \"%s\"}") { + name + } + }`, + bookKey, + author2Key, + ), + ExpectedError: "The given field does not exist. Name: notName", + }, + }, + } + + fixture.ExecuteTestCase(t, test) +} diff --git a/tests/integration/mutation/one_to_one/utils.go b/tests/integration/mutation/one_to_one/utils.go index ae795d670c..f960b3ba2c 100644 --- a/tests/integration/mutation/one_to_one/utils.go +++ b/tests/integration/mutation/one_to_one/utils.go @@ -19,7 +19,6 @@ import ( func ExecuteTestCase(t *testing.T, test testUtils.TestCase) { testUtils.ExecuteTestCase( t, - []string{"Book", "Author"}, testUtils.TestCase{ Description: test.Description, Actions: append( diff --git a/tests/integration/mutation/relation/create/with_txn_test.go b/tests/integration/mutation/relation/create/with_txn_test.go index ccb04717b4..d48df2dfd4 100644 --- a/tests/integration/mutation/relation/create/with_txn_test.go +++ b/tests/integration/mutation/relation/create/with_txn_test.go @@ -13,8 +13,9 @@ package relation_create import ( "testing" - testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/immutable" + testUtils "github.com/sourcenetwork/defradb/tests/integration" relationTests "github.com/sourcenetwork/defradb/tests/integration/mutation/relation" ) @@ -39,8 +40,8 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsForward(t *testing. }`, }, // Create books related to publishers, and ensure they are correctly linked (in and out of transactions). - testUtils.TransactionRequest2{ - TransactionID: 0, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `mutation { create_Book(data: "{\"name\": \"Book By Website\",\"rating\": 4.0, \"publisher_id\": \"bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4\"}") { _key @@ -52,8 +53,8 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsForward(t *testing. }, }, }, - testUtils.TransactionRequest2{ - TransactionID: 1, + testUtils.Request{ + TransactionID: immutable.Some(1), Request: `mutation { create_Book(data: "{\"name\": \"Book By Online\",\"rating\": 4.0, \"publisher_id\": \"bae-8a381044-9206-51e7-8bc8-dc683d5f2523\"}") { _key @@ -66,8 +67,8 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsForward(t *testing. }, }, // Assert publisher -> books direction within transaction 0. - testUtils.TransactionRequest2{ - TransactionID: 0, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `query { Publisher { _key @@ -96,8 +97,8 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsForward(t *testing. }, }, // Assert publisher -> books direction within transaction 1. - testUtils.TransactionRequest2{ - TransactionID: 1, + testUtils.Request{ + TransactionID: immutable.Some(1), Request: `query { Publisher { _key @@ -191,8 +192,8 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsBackward(t *testing }`, }, // Create books related to publishers, and ensure they are correctly linked (in and out of transactions). - testUtils.TransactionRequest2{ - TransactionID: 0, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `mutation { create_Book(data: "{\"name\": \"Book By Website\",\"rating\": 4.0, \"publisher_id\": \"bae-0e7c3bb5-4917-5d98-9fcf-b9db369ea6e4\"}") { _key @@ -204,8 +205,8 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsBackward(t *testing }, }, }, - testUtils.TransactionRequest2{ - TransactionID: 1, + testUtils.Request{ + TransactionID: immutable.Some(1), Request: `mutation { create_Book(data: "{\"name\": \"Book By Online\",\"rating\": 4.0, \"publisher_id\": \"bae-8a381044-9206-51e7-8bc8-dc683d5f2523\"}") { _key @@ -218,8 +219,8 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsBackward(t *testing }, }, // Assert publisher -> books direction within transaction 0. - testUtils.TransactionRequest2{ - TransactionID: 0, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `query { Book { _key @@ -242,8 +243,8 @@ func TestTransactionalCreationAndLinkingOfRelationalDocumentsBackward(t *testing }, }, // Assert publisher -> books direction within transaction 1. - testUtils.TransactionRequest2{ - TransactionID: 1, + testUtils.Request{ + TransactionID: immutable.Some(1), Request: `query { Book { _key diff --git a/tests/integration/mutation/relation/delete/with_txn_test.go b/tests/integration/mutation/relation/delete/with_txn_test.go index 955bddab70..aa5491417f 100644 --- a/tests/integration/mutation/relation/delete/with_txn_test.go +++ b/tests/integration/mutation/relation/delete/with_txn_test.go @@ -13,8 +13,9 @@ package relation_delete import ( "testing" - testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/immutable" + testUtils "github.com/sourcenetwork/defradb/tests/integration" relationTests "github.com/sourcenetwork/defradb/tests/integration/mutation/relation" ) @@ -41,9 +42,9 @@ func TestTxnDeletionOfRelatedDocFromPrimarySideForwardDirection(t *testing.T) { "address": "Manning Publications" }`, }, - testUtils.TransactionRequest2{ + testUtils.Request{ // Delete a linked book that exists. - TransactionID: 0, + TransactionID: immutable.Some(0), Request: `mutation { delete_Book(id: "bae-5b16ccd7-9cae-5145-a56c-03cfe7787722") { _key @@ -107,9 +108,9 @@ func TestTxnDeletionOfRelatedDocFromPrimarySideBackwardDirection(t *testing.T) { "address": "Manning Publications" }`, }, - testUtils.TransactionRequest2{ + testUtils.Request{ // Delete a linked book that exists. - TransactionID: 0, + TransactionID: immutable.Some(0), Request: `mutation { delete_Book(id: "bae-5b16ccd7-9cae-5145-a56c-03cfe7787722") { _key @@ -167,9 +168,9 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnForwardDirection(t *tes "address": "Manning Publications" }`, }, - testUtils.TransactionRequest2{ + testUtils.Request{ // Delete a linked book that exists. - TransactionID: 0, + TransactionID: immutable.Some(0), Request: `mutation { delete_Book(id: "bae-5b16ccd7-9cae-5145-a56c-03cfe7787722") { _key @@ -181,9 +182,9 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnForwardDirection(t *tes }, }, }, - testUtils.TransactionRequest2{ + testUtils.Request{ // Read the book (forward) that was deleted (in the non-commited transaction) in another transaction. - TransactionID: 1, + TransactionID: immutable.Some(1), Request: `query { Publisher { _key @@ -257,9 +258,9 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnBackwardDirection(t *te "address": "Manning Publications" }`, }, - testUtils.TransactionRequest2{ + testUtils.Request{ // Delete a linked book that exists in transaction 0. - TransactionID: 0, + TransactionID: immutable.Some(0), Request: `mutation { delete_Book(id: "bae-5b16ccd7-9cae-5145-a56c-03cfe7787722") { _key @@ -271,9 +272,9 @@ func TestATxnCanReadARecordThatIsDeletedInANonCommitedTxnBackwardDirection(t *te }, }, }, - testUtils.TransactionRequest2{ + testUtils.Request{ // Read the book (backwards) that was deleted (in the non-commited transaction) in another transaction. - TransactionID: 1, + TransactionID: immutable.Some(1), Request: `query { Book { _key @@ -341,10 +342,10 @@ func TestTxnDeletionOfRelatedDocFromNonPrimarySideForwardDirection(t *testing.T) "address": "Manning Early Access Program (MEAP)" }`, }, - testUtils.TransactionRequest2{ + testUtils.Request{ // Delete a publisher and outside the transaction ensure it's linked // book gets correctly unlinked too. - TransactionID: 0, + TransactionID: immutable.Some(0), Request: `mutation { delete_Publisher(id: "bae-8a381044-9206-51e7-8bc8-dc683d5f2523") { _key @@ -402,10 +403,10 @@ func TestTxnDeletionOfRelatedDocFromNonPrimarySideBackwardDirection(t *testing.T "address": "Manning Early Access Program (MEAP)" }`, }, - testUtils.TransactionRequest2{ + testUtils.Request{ // Delete a publisher and outside the transaction ensure it's linked // book gets correctly unlinked too. - TransactionID: 0, + TransactionID: immutable.Some(0), Request: `mutation { delete_Publisher(id: "bae-8a381044-9206-51e7-8bc8-dc683d5f2523") { _key diff --git a/tests/integration/mutation/relation/utils.go b/tests/integration/mutation/relation/utils.go index c4d7f583bb..610124122c 100644 --- a/tests/integration/mutation/relation/utils.go +++ b/tests/integration/mutation/relation/utils.go @@ -19,7 +19,6 @@ import ( func Execute(t *testing.T, test testUtils.TestCase) { testUtils.ExecuteTestCase( t, - []string{"Book", "Author", "Publisher"}, testUtils.TestCase{ Description: test.Description, Actions: append( diff --git a/tests/integration/mutation/simple/create/simple_test.go b/tests/integration/mutation/simple/create/simple_test.go index cc650f910b..b173611f7f 100644 --- a/tests/integration/mutation/simple/create/simple_test.go +++ b/tests/integration/mutation/simple/create/simple_test.go @@ -50,7 +50,7 @@ func TestMutationCreateSimpleErrorsGivenNonExistantField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestMutationCreateSimple(t *testing.T) { @@ -93,7 +93,7 @@ func TestMutationCreateSimpleDoesNotCreateDocGivenDuplicate(t *testing.T) { }`, }, }, - ExpectedError: "a document with the given dockey already exists", + ExpectedError: "a document with the given dockey already exists. DocKey: ", } simpleTests.ExecuteTestCase(t, test) diff --git a/tests/integration/mutation/simple/delete/multi_ids_test.go b/tests/integration/mutation/simple/delete/multi_ids_test.go index ba7dd98ba3..80585d5d79 100644 --- a/tests/integration/mutation/simple/delete/multi_ids_test.go +++ b/tests/integration/mutation/simple/delete/multi_ids_test.go @@ -13,51 +13,64 @@ package delete import ( "testing" + "github.com/sourcenetwork/immutable" + testUtils "github.com/sourcenetwork/defradb/tests/integration" simpleTests "github.com/sourcenetwork/defradb/tests/integration/mutation/simple" ) -func TestDeletionOfMultipleDocumentUsingMultipleKeys_Success(t *testing.T) { - tests := []testUtils.RequestTestCase{ - - { - Description: "Simple multi-key delete mutation with one key that exists.", - Docs: map[int][]string{ - 0: { - `{ - "name": "Shahzad", - "age": 26, - "points": 48.48, - "verified": true - }`, - }, +func TestDeletionOfMultipleDocumentUsingMultipleKeysWhereOneExists(t *testing.T) { + test := testUtils.TestCase{ + Description: "Simple multi-key delete mutation with one key that exists.", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + points: Float + verified: Boolean + } + `, }, - TransactionalRequests: []testUtils.TransactionRequest{ - { - TransactionId: 0, - Request: `mutation { - delete_User(ids: ["bae-6a6482a8-24e1-5c73-a237-ca569e41507d"]) { - _key - } - }`, - Results: []map[string]any{ - { - "_key": "bae-6a6482a8-24e1-5c73-a237-ca569e41507d", - }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Shahzad", + "age": 26, + "points": 48.48, + "verified": true + }`, + }, + testUtils.Request{ + TransactionID: immutable.Some(0), + Request: `mutation { + delete_User(ids: ["bae-6a6482a8-24e1-5c73-a237-ca569e41507d"]) { + _key + } + }`, + Results: []map[string]any{ + { + "_key": "bae-6a6482a8-24e1-5c73-a237-ca569e41507d", }, }, - { - TransactionId: 0, - Request: `query { - User(dockeys: ["bae-6a6482a8-24e1-5c73-a237-ca569e41507d"]) { - _key - } - }`, - Results: []map[string]any{}, - }, + }, + testUtils.Request{ + TransactionID: immutable.Some(0), + Request: `query { + User(dockeys: ["bae-6a6482a8-24e1-5c73-a237-ca569e41507d"]) { + _key + } + }`, + Results: []map[string]any{}, }, }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestDeletionOfMultipleDocumentUsingMultipleKeys_Success(t *testing.T) { + tests := []testUtils.RequestTestCase{ { Description: "Delete multiple documents that exist, when given multiple keys.", Request: `mutation { @@ -403,7 +416,7 @@ func TestDeletionOfMultipleDocumentsUsingSingleKeyWithShowDeletedDocumentQuery_S }, } - testUtils.ExecuteTestCase(t, []string{"User"}, test) + testUtils.ExecuteTestCase(t, test) } func TestDeletionOfMultipleDocumentsUsingEmptySet(t *testing.T) { @@ -461,5 +474,5 @@ func TestDeletionOfMultipleDocumentsUsingEmptySet(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"User"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/mutation/simple/delete/single_id_test.go b/tests/integration/mutation/simple/delete/single_id_test.go index c7583a3936..a3ec777233 100644 --- a/tests/integration/mutation/simple/delete/single_id_test.go +++ b/tests/integration/mutation/simple/delete/single_id_test.go @@ -13,53 +13,66 @@ package delete import ( "testing" + "github.com/sourcenetwork/immutable" + testUtils "github.com/sourcenetwork/defradb/tests/integration" simpleTests "github.com/sourcenetwork/defradb/tests/integration/mutation/simple" ) -func TestDeletionOfADocumentUsingSingleKey_Success(t *testing.T) { - tests := []testUtils.RequestTestCase{ - - { - Description: "Simple delete mutation where one element exists.", - Docs: map[int][]string{ - 0: { - `{ - "name": "Shahzad", - "age": 26, - "points": 48.5, - "verified": true - }`, - }, +func TestDeletionOfADocumentUsingSingleKeyWhereDocExists(t *testing.T) { + test := testUtils.TestCase{ + Description: "Simple delete mutation where one element exists.", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + points: Float + verified: Boolean + } + `, }, - TransactionalRequests: []testUtils.TransactionRequest{ - { - TransactionId: 0, - Request: `mutation { - delete_User(id: "bae-8ca944fd-260e-5a44-b88f-326d9faca810") { - _key - } - }`, - Results: []map[string]any{ - { - "_key": "bae-8ca944fd-260e-5a44-b88f-326d9faca810", - }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Shahzad", + "age": 26, + "points": 48.5, + "verified": true + }`, + }, + testUtils.Request{ + TransactionID: immutable.Some(0), + Request: `mutation { + delete_User(id: "bae-8ca944fd-260e-5a44-b88f-326d9faca810") { + _key + } + }`, + Results: []map[string]any{ + { + "_key": "bae-8ca944fd-260e-5a44-b88f-326d9faca810", }, }, - { - TransactionId: 0, - Request: `query { - User(dockey: "bae-8ca944fd-260e-5a44-b88f-326d9faca810") { - _key - } - }`, + }, + testUtils.Request{ + TransactionID: immutable.Some(0), + Request: `query { + User(dockey: "bae-8ca944fd-260e-5a44-b88f-326d9faca810") { + _key + } + }`, - // explicitly empty - Results: []map[string]any{}, - }, + // explicitly empty + Results: []map[string]any{}, }, }, + } + + testUtils.ExecuteTestCase(t, test) +} +func TestDeletionOfADocumentUsingSingleKey_Success(t *testing.T) { + tests := []testUtils.RequestTestCase{ { Description: "Simple delete mutation with an aliased _key name.", Docs: map[int][]string{ @@ -265,5 +278,5 @@ func TestDeletionOfADocumentUsingSingleKeyWithShowDeletedDocumentQuery_Success(t }, } - testUtils.ExecuteTestCase(t, []string{"User"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/mutation/simple/delete/with_filter_test.go b/tests/integration/mutation/simple/delete/with_filter_test.go index f21674d7e4..6c934bfd5f 100644 --- a/tests/integration/mutation/simple/delete/with_filter_test.go +++ b/tests/integration/mutation/simple/delete/with_filter_test.go @@ -414,5 +414,5 @@ func TestDeletionOfDocumentsWithFilterWithShowDeletedDocumentQuery_Success(t *te }, } - testUtils.ExecuteTestCase(t, []string{"User"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/mutation/simple/mix/with_txn_test.go b/tests/integration/mutation/simple/mix/with_txn_test.go index fe4f7f0031..a2d1e9b08e 100644 --- a/tests/integration/mutation/simple/mix/with_txn_test.go +++ b/tests/integration/mutation/simple/mix/with_txn_test.go @@ -13,16 +13,26 @@ package mix import ( "testing" + "github.com/sourcenetwork/immutable" + testUtils "github.com/sourcenetwork/defradb/tests/integration" simpleTests "github.com/sourcenetwork/defradb/tests/integration/mutation/simple" ) func TestMutationWithTxnDeletesUserGivenSameTransaction(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Create followed by delete in same transaction", - TransactionalRequests: []testUtils.TransactionRequest{ - { - TransactionId: 0, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + } + `, + }, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `mutation { create_User(data: "{\"name\": \"John\",\"age\": 27}") { _key @@ -34,8 +44,8 @@ func TestMutationWithTxnDeletesUserGivenSameTransaction(t *testing.T) { }, }, }, - { - TransactionId: 0, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `mutation { delete_User(id: "bae-88b63198-7d38-5714-a9ff-21ba46374fd1") { _key @@ -50,15 +60,23 @@ func TestMutationWithTxnDeletesUserGivenSameTransaction(t *testing.T) { }, } - simpleTests.ExecuteTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestMutationWithTxnDoesNotDeletesUserGivenDifferentTransactions(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Create followed by delete on 2nd transaction", - TransactionalRequests: []testUtils.TransactionRequest{ - { - TransactionId: 0, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + } + `, + }, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `mutation { create_User(data: "{\"name\": \"John\",\"age\": 27}") { _key @@ -70,8 +88,8 @@ func TestMutationWithTxnDoesNotDeletesUserGivenDifferentTransactions(t *testing. }, }, }, - { - TransactionId: 1, + testUtils.Request{ + TransactionID: immutable.Some(1), Request: `mutation { delete_User(id: "bae-88b63198-7d38-5714-a9ff-21ba46374fd1") { _key @@ -79,8 +97,8 @@ func TestMutationWithTxnDoesNotDeletesUserGivenDifferentTransactions(t *testing. }`, Results: []map[string]any{}, }, - { - TransactionId: 0, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `query { User { _key @@ -96,8 +114,8 @@ func TestMutationWithTxnDoesNotDeletesUserGivenDifferentTransactions(t *testing. }, }, }, - { - TransactionId: 1, + testUtils.Request{ + TransactionID: immutable.Some(1), Request: `query { User { _key @@ -110,23 +128,29 @@ func TestMutationWithTxnDoesNotDeletesUserGivenDifferentTransactions(t *testing. }, } - simpleTests.ExecuteTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestMutationWithTxnDoesUpdateUserGivenSameTransactions(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Update followed by read in same transaction", - Docs: map[int][]string{ - 0: { - `{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ "name": "John", "age": 27 }`, }, - }, - TransactionalRequests: []testUtils.TransactionRequest{ - { - TransactionId: 0, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `mutation { update_User(data: "{\"age\": 28}") { _key @@ -138,8 +162,8 @@ func TestMutationWithTxnDoesUpdateUserGivenSameTransactions(t *testing.T) { }, }, }, - { - TransactionId: 0, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `query { User { _key @@ -158,23 +182,29 @@ func TestMutationWithTxnDoesUpdateUserGivenSameTransactions(t *testing.T) { }, } - simpleTests.ExecuteTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestMutationWithTxnDoesNotUpdateUserGivenDifferentTransactions(t *testing.T) { - test := testUtils.RequestTestCase{ + test := testUtils.TestCase{ Description: "Update followed by read in different transaction", - Docs: map[int][]string{ - 0: { - `{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + age: Int + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ "name": "John", "age": 27 }`, }, - }, - TransactionalRequests: []testUtils.TransactionRequest{ - { - TransactionId: 0, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `mutation { update_User(data: "{\"age\": 28}") { _key @@ -190,8 +220,8 @@ func TestMutationWithTxnDoesNotUpdateUserGivenDifferentTransactions(t *testing.T }, }, }, - { - TransactionId: 1, + testUtils.Request{ + TransactionID: immutable.Some(1), Request: `query { User { _key @@ -210,7 +240,7 @@ func TestMutationWithTxnDoesNotUpdateUserGivenDifferentTransactions(t *testing.T }, } - simpleTests.ExecuteTestCase(t, test) + testUtils.ExecuteTestCase(t, test) } func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) { @@ -224,8 +254,8 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) "age": 27 }`, }, - testUtils.TransactionRequest2{ - TransactionID: 0, + testUtils.Request{ + TransactionID: immutable.Some(0), Request: `mutation { update_User(data: "{\"age\": 28}") { _key @@ -241,8 +271,8 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) }, }, }, - testUtils.TransactionRequest2{ - TransactionID: 1, + testUtils.Request{ + TransactionID: immutable.Some(1), Request: `mutation { update_User(data: "{\"age\": 29}") { _key diff --git a/tests/integration/mutation/simple/utils.go b/tests/integration/mutation/simple/utils.go index 3c8b2078e3..c45c7340be 100644 --- a/tests/integration/mutation/simple/utils.go +++ b/tests/integration/mutation/simple/utils.go @@ -32,7 +32,6 @@ func ExecuteTestCase(t *testing.T, test testUtils.RequestTestCase) { func Execute(t *testing.T, test testUtils.TestCase) { testUtils.ExecuteTestCase( t, - []string{"User"}, testUtils.TestCase{ Description: test.Description, Actions: append( diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go index 99b89d8c7a..83d01743b9 100644 --- a/tests/integration/net/order/utils.go +++ b/tests/integration/net/order/utils.go @@ -25,8 +25,9 @@ import ( coreDB "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/net" + netpb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" - "github.com/sourcenetwork/defradb/node" testutils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -53,11 +54,11 @@ type P2PTestCase struct { // Configuration parameters for each peer NodeConfig []*config.Config - // List of peers for each node. + // List of peers for each net. // Only peers with lower index than the node can be used in the list of peers. NodePeers map[int][]int - // List of replicators for each node. + // List of replicators for each net. // Only peers with lower index than the node can be used in the list of peers. NodeReplicators map[int][]int @@ -70,7 +71,7 @@ type P2PTestCase struct { ReplicatorResult map[int]map[string]map[string]any } -func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*node.Node, []client.DocKey, error) { +func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*net.Node, []client.DocKey, error) { ctx := context.Background() log.Info(ctx, "Building new memory store") @@ -91,13 +92,13 @@ func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*node.Nod dockeys = append(dockeys, dockey) } - // init the p2p node - var n *node.Node + // init the P2P node + var n *net.Node log.Info(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) - n, err = node.NewNode( + n, err = net.NewNode( ctx, db, - cfg.NodeConfig(), + net.WithConfig(cfg), ) if err != nil { return nil, nil, errors.Wrap("failed to start P2P node", err) @@ -195,7 +196,7 @@ func executeTestCase(t *testing.T, test P2PTestCase) { ctx := context.Background() dockeys := []client.DocKey{} - nodes := []*node.Node{} + nodes := []*net.Node{} for i, cfg := range test.NodeConfig { log.Info(ctx, fmt.Sprintf("Setting up node %d", i)) @@ -304,7 +305,12 @@ func executeTestCase(t *testing.T, test P2PTestCase) { fmt.Sprintf("%s/p2p/%s", test.NodeConfig[r].Net.P2PAddress, nodes[r].PeerID()), ) require.NoError(t, err) - _, err = n.Peer.SetReplicator(ctx, addr) + _, err = n.Peer.SetReplicator( + ctx, + &netpb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) require.NoError(t, err) } } diff --git a/tests/integration/net/state/one_to_many/peer/with_create_update_test.go b/tests/integration/net/state/one_to_many/peer/with_create_update_test.go index c81fbbc46a..a7451fc956 100644 --- a/tests/integration/net/state/one_to_many/peer/with_create_update_test.go +++ b/tests/integration/net/state/one_to_many/peer/with_create_update_test.go @@ -113,5 +113,5 @@ func TestP2POneToManyPeerWithCreateUpdateLinkingSyncedDocToUnsyncedDoc(t *testin }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/one_to_many/replicator/with_create_test.go b/tests/integration/net/state/one_to_many/replicator/with_create_test.go index 0841803868..10e80a9a02 100644 --- a/tests/integration/net/state/one_to_many/replicator/with_create_test.go +++ b/tests/integration/net/state/one_to_many/replicator/with_create_test.go @@ -80,5 +80,5 @@ func TestP2POneToManyReplicator(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_get_remove_test.go b/tests/integration/net/state/simple/peer/subscribe/with_add_get_remove_test.go index dd81ce1700..f463cc00d3 100644 --- a/tests/integration/net/state/simple/peer/subscribe/with_add_get_remove_test.go +++ b/tests/integration/net/state/simple/peer/subscribe/with_add_get_remove_test.go @@ -47,7 +47,7 @@ func TestP2PSubscribeAddRemoveGetSingle(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PSubscribeAddRemoveGetMultiple(t *testing.T) { @@ -85,5 +85,5 @@ func TestP2PSubscribeAddRemoveGetMultiple(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users", "Giraffes"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_get_test.go b/tests/integration/net/state/simple/peer/subscribe/with_add_get_test.go index d0aaeec78b..450902074a 100644 --- a/tests/integration/net/state/simple/peer/subscribe/with_add_get_test.go +++ b/tests/integration/net/state/simple/peer/subscribe/with_add_get_test.go @@ -43,7 +43,7 @@ func TestP2PSubscribeAddGetSingle(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PSubscribeAddGetMultiple(t *testing.T) { @@ -79,5 +79,5 @@ func TestP2PSubscribeAddGetMultiple(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users", "Giraffes", "Bears"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_remove_test.go b/tests/integration/net/state/simple/peer/subscribe/with_add_remove_test.go index 577ffe7142..d3795ee0e1 100644 --- a/tests/integration/net/state/simple/peer/subscribe/with_add_remove_test.go +++ b/tests/integration/net/state/simple/peer/subscribe/with_add_remove_test.go @@ -72,7 +72,7 @@ func TestP2PSubscribeAddAndRemoveSingle(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PSubscribeAddAndRemoveMultiple(t *testing.T) { @@ -144,7 +144,7 @@ func TestP2PSubscribeAddAndRemoveMultiple(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users", "Giraffes"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PSubscribeAddSingleAndRemoveErroneous(t *testing.T) { @@ -169,7 +169,7 @@ func TestP2PSubscribeAddSingleAndRemoveErroneous(t *testing.T) { }, testUtils.UnsubscribeToCollection{ NodeID: 1, - CollectionIDs: []int{0, testUtils.NonExistantCollectionID}, + CollectionIDs: []int{0, testUtils.NonExistentCollectionID}, ExpectedError: "datastore: key not found", }, testUtils.CreateDoc{ @@ -196,7 +196,7 @@ func TestP2PSubscribeAddSingleAndRemoveErroneous(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PSubscribeAddSingleAndRemoveNone(t *testing.T) { @@ -246,5 +246,5 @@ func TestP2PSubscribeAddSingleAndRemoveNone(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_test.go b/tests/integration/net/state/simple/peer/subscribe/with_add_test.go index cd8d140191..8cd294e98a 100644 --- a/tests/integration/net/state/simple/peer/subscribe/with_add_test.go +++ b/tests/integration/net/state/simple/peer/subscribe/with_add_test.go @@ -87,7 +87,7 @@ func TestP2PSubscribeAddSingle(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PSubscribeAddMultiple(t *testing.T) { @@ -177,7 +177,7 @@ func TestP2PSubscribeAddMultiple(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users", "Giraffes", "Bears"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PSubscribeAddSingleErroneousCollectionID(t *testing.T) { @@ -198,7 +198,7 @@ func TestP2PSubscribeAddSingleErroneousCollectionID(t *testing.T) { }, testUtils.SubscribeToCollection{ NodeID: 1, - CollectionIDs: []int{testUtils.NonExistantCollectionID}, + CollectionIDs: []int{testUtils.NonExistentCollectionID}, ExpectedError: "datastore: key not found", }, testUtils.CreateDoc{ @@ -221,7 +221,7 @@ func TestP2PSubscribeAddSingleErroneousCollectionID(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PSubscribeAddValidAndErroneousCollectionID(t *testing.T) { @@ -242,7 +242,7 @@ func TestP2PSubscribeAddValidAndErroneousCollectionID(t *testing.T) { }, testUtils.SubscribeToCollection{ NodeID: 1, - CollectionIDs: []int{0, testUtils.NonExistantCollectionID}, + CollectionIDs: []int{0, testUtils.NonExistentCollectionID}, ExpectedError: "datastore: key not found", }, testUtils.CreateDoc{ @@ -266,7 +266,7 @@ func TestP2PSubscribeAddValidAndErroneousCollectionID(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PSubscribeAddValidThenErroneousCollectionID(t *testing.T) { @@ -291,7 +291,7 @@ func TestP2PSubscribeAddValidThenErroneousCollectionID(t *testing.T) { }, testUtils.SubscribeToCollection{ NodeID: 1, - CollectionIDs: []int{testUtils.NonExistantCollectionID}, + CollectionIDs: []int{testUtils.NonExistentCollectionID}, ExpectedError: "datastore: key not found", }, testUtils.CreateDoc{ @@ -318,7 +318,7 @@ func TestP2PSubscribeAddValidThenErroneousCollectionID(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PSubscribeAddNone(t *testing.T) { @@ -360,5 +360,5 @@ func TestP2PSubscribeAddNone(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/subscribe/with_get_test.go b/tests/integration/net/state/simple/peer/subscribe/with_get_test.go index 6c7a38ef5c..775c7e1222 100644 --- a/tests/integration/net/state/simple/peer/subscribe/with_get_test.go +++ b/tests/integration/net/state/simple/peer/subscribe/with_get_test.go @@ -36,5 +36,5 @@ func TestP2PSubscribeGetAll(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/with_create_add_field_test.go b/tests/integration/net/state/simple/peer/with_create_add_field_test.go index 7ad2b90f26..034340b92e 100644 --- a/tests/integration/net/state/simple/peer/with_create_add_field_test.go +++ b/tests/integration/net/state/simple/peer/with_create_add_field_test.go @@ -88,7 +88,7 @@ func TestP2PPeerCreateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PPeerCreateWithNewFieldSyncsDocsToNewerSchemaVersion(t *testing.T) { @@ -145,7 +145,7 @@ func TestP2PPeerCreateWithNewFieldSyncsDocsToNewerSchemaVersion(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PPeerCreateWithNewFieldSyncsDocsToUpdatedSchemaVersion(t *testing.T) { @@ -201,5 +201,5 @@ func TestP2PPeerCreateWithNewFieldSyncsDocsToUpdatedSchemaVersion(t *testing.T) }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/with_create_test.go b/tests/integration/net/state/simple/peer/with_create_test.go index f13ac1846d..8833167aa2 100644 --- a/tests/integration/net/state/simple/peer/with_create_test.go +++ b/tests/integration/net/state/simple/peer/with_create_test.go @@ -83,7 +83,7 @@ func TestP2PCreateDoesNotSync(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // TestP2PCreateWithP2PCollection ensures that created documents reach the node that subscribes @@ -184,5 +184,5 @@ func TestP2PCreateWithP2PCollection(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/with_delete_test.go b/tests/integration/net/state/simple/peer/with_delete_test.go index e20add2f62..c838e8ab02 100644 --- a/tests/integration/net/state/simple/peer/with_delete_test.go +++ b/tests/integration/net/state/simple/peer/with_delete_test.go @@ -75,7 +75,7 @@ func TestP2PWithMultipleDocumentsSingleDelete(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PWithMultipleDocumentsSingleDeleteWithShowDeleted(t *testing.T) { @@ -138,7 +138,7 @@ func TestP2PWithMultipleDocumentsSingleDeleteWithShowDeleted(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PWithMultipleDocumentsWithSingleUpdateBeforeConnectSingleDeleteWithShowDeleted(t *testing.T) { @@ -210,7 +210,7 @@ func TestP2PWithMultipleDocumentsWithSingleUpdateBeforeConnectSingleDeleteWithSh }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PWithMultipleDocumentsWithMultipleUpdatesBeforeConnectSingleDeleteWithShowDeleted(t *testing.T) { @@ -291,7 +291,7 @@ func TestP2PWithMultipleDocumentsWithMultipleUpdatesBeforeConnectSingleDeleteWit }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PWithMultipleDocumentsWithUpdateAndDeleteBeforeConnectSingleDeleteWithShowDeleted(t *testing.T) { @@ -406,5 +406,5 @@ func TestP2PWithMultipleDocumentsWithUpdateAndDeleteBeforeConnectSingleDeleteWit }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/with_update_add_field_test.go b/tests/integration/net/state/simple/peer/with_update_add_field_test.go index 118cd04efb..89ab3a99b0 100644 --- a/tests/integration/net/state/simple/peer/with_update_add_field_test.go +++ b/tests/integration/net/state/simple/peer/with_update_add_field_test.go @@ -100,7 +100,7 @@ func TestP2PPeerUpdateWithNewFieldSyncsDocsToOlderSchemaVersionMultistep(t *test }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PPeerUpdateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing.T) { @@ -177,5 +177,5 @@ func TestP2PPeerUpdateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/with_update_restart_test.go b/tests/integration/net/state/simple/peer/with_update_restart_test.go index b65b8ce871..42fc00e4bd 100644 --- a/tests/integration/net/state/simple/peer/with_update_restart_test.go +++ b/tests/integration/net/state/simple/peer/with_update_restart_test.go @@ -66,5 +66,5 @@ func TestP2PWithSingleDocumentSingleUpdateFromChildAndRestart(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer/with_update_test.go b/tests/integration/net/state/simple/peer/with_update_test.go index 4fc1521eec..fe122239ce 100644 --- a/tests/integration/net/state/simple/peer/with_update_test.go +++ b/tests/integration/net/state/simple/peer/with_update_test.go @@ -67,7 +67,7 @@ func TestP2PWithSingleDocumentSingleUpdateFromChild(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // The parent-child distinction in these tests is as much documentation and test @@ -119,7 +119,7 @@ func TestP2PWithSingleDocumentSingleUpdateFromParent(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // TestP2PWithSingleDocumentUpdatePerNode tests document syncing between two nodes with a single update per node @@ -177,7 +177,7 @@ func TestP2PWithSingleDocumentUpdatePerNode(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PWithSingleDocumentSingleUpdateDoesNotSyncToNonPeerNode(t *testing.T) { @@ -257,7 +257,7 @@ func TestP2PWithSingleDocumentSingleUpdateDoesNotSyncToNonPeerNode(t *testing.T) }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PWithSingleDocumentSingleUpdateDoesNotSyncFromUnmappedNode(t *testing.T) { @@ -339,7 +339,7 @@ func TestP2PWithSingleDocumentSingleUpdateDoesNotSyncFromUnmappedNode(t *testing }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // TestP2PWithMultipleDocumentUpdatesPerNode tests document syncing between two nodes with multiple updates per node. @@ -419,7 +419,7 @@ func TestP2PWithMultipleDocumentUpdatesPerNode(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // TestP2PWithSingleDocumentSingleUpdateFromChildWithP2PCollection tests document syncing between two nodes by @@ -485,7 +485,7 @@ func TestP2PWithSingleDocumentSingleUpdateFromChildWithP2PCollection(t *testing. }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // TestP2PWithMultipleDocumentUpdatesPerNodeWithP2PCollection tests document syncing between two nodes with multiple @@ -601,5 +601,5 @@ func TestP2PWithMultipleDocumentUpdatesPerNodeWithP2PCollection(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer_replicator/with_create_test.go b/tests/integration/net/state/simple/peer_replicator/with_create_test.go index 991f8b9f84..72aae77a8c 100644 --- a/tests/integration/net/state/simple/peer_replicator/with_create_test.go +++ b/tests/integration/net/state/simple/peer_replicator/with_create_test.go @@ -102,5 +102,5 @@ func TestP2PPeerReplicatorWithCreate(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer_replicator/with_delete_test.go b/tests/integration/net/state/simple/peer_replicator/with_delete_test.go index 82f989909f..ba72c30610 100644 --- a/tests/integration/net/state/simple/peer_replicator/with_delete_test.go +++ b/tests/integration/net/state/simple/peer_replicator/with_delete_test.go @@ -71,5 +71,5 @@ func TestP2PPeerReplicatorWithDeleteShowDeleted(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer_replicator/with_update_restart_test.go b/tests/integration/net/state/simple/peer_replicator/with_update_restart_test.go index 3988463dc0..731f86b661 100644 --- a/tests/integration/net/state/simple/peer_replicator/with_update_restart_test.go +++ b/tests/integration/net/state/simple/peer_replicator/with_update_restart_test.go @@ -74,5 +74,5 @@ func TestP2PPeerReplicatorWithUpdateAndRestart(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/peer_replicator/with_update_test.go b/tests/integration/net/state/simple/peer_replicator/with_update_test.go index 7355c8f973..8e2a86998e 100644 --- a/tests/integration/net/state/simple/peer_replicator/with_update_test.go +++ b/tests/integration/net/state/simple/peer_replicator/with_update_test.go @@ -69,5 +69,5 @@ func TestP2PPeerReplicatorWithUpdate(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/replicator/with_create_add_field_test.go b/tests/integration/net/state/simple/replicator/with_create_add_field_test.go index e117da2bd3..3e36b5c847 100644 --- a/tests/integration/net/state/simple/replicator/with_create_add_field_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_add_field_test.go @@ -67,7 +67,7 @@ func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToOlderSchemaVersion(t }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToNewerSchemaVersion(t *testing.T) { @@ -118,7 +118,7 @@ func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToNewerSchemaVersion(t }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToUpdatedSchemaVersion(t *testing.T) { @@ -171,5 +171,5 @@ func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToUpdatedSchemaVersion( }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/replicator/with_create_restart_test.go b/tests/integration/net/state/simple/replicator/with_create_restart_test.go index 7e01c49d7b..7dc5746724 100644 --- a/tests/integration/net/state/simple/replicator/with_create_restart_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_restart_test.go @@ -60,5 +60,5 @@ func TestP2POneToOneReplicatorWithRestart(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/replicator/with_create_test.go b/tests/integration/net/state/simple/replicator/with_create_test.go index c927d28c47..e1e75a25c0 100644 --- a/tests/integration/net/state/simple/replicator/with_create_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_test.go @@ -60,7 +60,7 @@ func TestP2POneToOneReplicator(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorDoesNotSyncExisting(t *testing.T) { @@ -105,7 +105,7 @@ func TestP2POneToOneReplicatorDoesNotSyncExisting(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorDoesNotSyncFromTargetToSource(t *testing.T) { @@ -147,7 +147,7 @@ func TestP2POneToOneReplicatorDoesNotSyncFromTargetToSource(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToManyReplicator(t *testing.T) { @@ -196,7 +196,7 @@ func TestP2POneToManyReplicator(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneOfManyReplicator(t *testing.T) { @@ -266,7 +266,7 @@ func TestP2POneToOneOfManyReplicator(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorManyDocs(t *testing.T) { @@ -321,7 +321,7 @@ func TestP2POneToOneReplicatorManyDocs(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToManyReplicatorManyDocs(t *testing.T) { @@ -381,7 +381,7 @@ func TestP2POneToManyReplicatorManyDocs(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorOrderIndependent(t *testing.T) { @@ -451,7 +451,7 @@ func TestP2POneToOneReplicatorOrderIndependent(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorOrderIndependentDirectCreate(t *testing.T) { @@ -511,5 +511,5 @@ func TestP2POneToOneReplicatorOrderIndependentDirectCreate(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/replicator/with_create_update_test.go b/tests/integration/net/state/simple/replicator/with_create_update_test.go index cf2231c7e0..dd3612055d 100644 --- a/tests/integration/net/state/simple/replicator/with_create_update_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_update_test.go @@ -68,7 +68,7 @@ func TestP2POneToOneReplicatorWithCreateWithUpdate(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorWithCreateWithUpdateOnRecipientNode(t *testing.T) { @@ -125,7 +125,7 @@ func TestP2POneToOneReplicatorWithCreateWithUpdateOnRecipientNode(t *testing.T) }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorDoesNotUpdateDocExistingOnlyOnTarget(t *testing.T) { @@ -186,5 +186,5 @@ func TestP2POneToOneReplicatorDoesNotUpdateDocExistingOnlyOnTarget(t *testing.T) }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/replicator/with_delete_test.go b/tests/integration/net/state/simple/replicator/with_delete_test.go index c4cf12bbfa..48235e1b0a 100644 --- a/tests/integration/net/state/simple/replicator/with_delete_test.go +++ b/tests/integration/net/state/simple/replicator/with_delete_test.go @@ -69,7 +69,7 @@ func TestP2POneToOneReplicatorDeletesDocCreatedBeforeReplicatorConfig(t *testing }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorDeletesDocCreatedBeforeReplicatorConfigWithNodesInversed(t *testing.T) { @@ -123,5 +123,5 @@ func TestP2POneToOneReplicatorDeletesDocCreatedBeforeReplicatorConfigWithNodesIn }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/replicator/with_update_add_field_test.go b/tests/integration/net/state/simple/replicator/with_update_add_field_test.go index 22c2da4759..22786fcaad 100644 --- a/tests/integration/net/state/simple/replicator/with_update_add_field_test.go +++ b/tests/integration/net/state/simple/replicator/with_update_add_field_test.go @@ -96,7 +96,7 @@ func TestP2PReplicatorUpdateWithNewFieldSyncsDocsToOlderSchemaVersionMultistep(t }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2PReplicatorUpdateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing.T) { @@ -169,5 +169,5 @@ func TestP2PReplicatorUpdateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/net/state/simple/replicator/with_update_test.go b/tests/integration/net/state/simple/replicator/with_update_test.go index afea25c232..370160c3b8 100644 --- a/tests/integration/net/state/simple/replicator/with_update_test.go +++ b/tests/integration/net/state/simple/replicator/with_update_test.go @@ -67,7 +67,7 @@ func TestP2POneToOneReplicatorUpdatesDocCreatedBeforeReplicatorConfig(t *testing }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestP2POneToOneReplicatorUpdatesDocCreatedBeforeReplicatorConfigWithNodesInversed(t *testing.T) { @@ -119,5 +119,5 @@ func TestP2POneToOneReplicatorUpdatesDocCreatedBeforeReplicatorConfigWithNodesIn }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/p2p.go b/tests/integration/p2p.go index 8164facb03..24d20d8c31 100644 --- a/tests/integration/p2p.go +++ b/tests/integration/p2p.go @@ -11,16 +11,14 @@ package tests import ( - "context" "fmt" - "testing" "time" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/net" + pb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" - "github.com/sourcenetwork/defradb/node" ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" @@ -35,18 +33,18 @@ import ( type ConnectPeers struct { // SourceNodeID is the node ID (index) of the first node to connect. // - // Is completely interchangable with TargetNodeID and which way round + // Is completely interchangeable with TargetNodeID and which way round // these properties are specified is purely cosmetic. SourceNodeID int // TargetNodeID is the node ID (index) of the second node to connect. // - // Is completely interchangable with SourceNodeID and which way round + // Is completely interchangeable with SourceNodeID and which way round // these properties are specified is purely cosmetic. TargetNodeID int } -// ConfigureReplicator confugures a directional replicator relationship between +// ConfigureReplicator configures a directional replicator relationship between // two nodes. // // All document changes made in the source node will be synced to the target node. @@ -61,9 +59,9 @@ type ConfigureReplicator struct { TargetNodeID int } -// NonExistantCollectionID can be used to represent a non-existant collection ID, it will be substituted -// for a non-existant collection ID when used in actions that support this. -const NonExistantCollectionID int = -1 +// NonExistentCollectionID can be used to represent a non-existent collection ID, it will be substituted +// for a non-existent collection ID when used in actions that support this. +const NonExistentCollectionID int = -1 // SubscribeToCollection sets up a subscription on the given node to the given collection. // @@ -78,7 +76,7 @@ type SubscribeToCollection struct { // CollectionIDs are the collection IDs (indexes) of the collections to subscribe to. // - // A [NonExistantCollectionID] may be provided to test non-existant collection IDs. + // A [NonExistentCollectionID] may be provided to test non-existent collection IDs. CollectionIDs []int // Any error expected from the action. Optional. @@ -96,7 +94,7 @@ type UnsubscribeToCollection struct { // CollectionIDs are the collection IDs (indexes) of the collections to unsubscribe from. // - // A [NonExistantCollectionID] may be provided to test non-existant collection IDs. + // A [NonExistentCollectionID] may be provided to test non-existent collection IDs. CollectionIDs []int // Any error expected from the action. Optional. @@ -125,57 +123,52 @@ type WaitForSync struct{} // AnyOf may be used as `Results` field where the value may // be one of several values, yet the value of that field must be the same -// across all nodes due to strong eventual consistancy. +// across all nodes due to strong eventual consistency. type AnyOf []any // connectPeers connects two existing, started, nodes as peers. It returns a channel -// that will recieve an empty struct upon sync completion of all expected peer-sync events. +// that will receive an empty struct upon sync completion of all expected peer-sync events. // // Any errors generated whilst configuring the peers or waiting on sync will result in a test failure. func connectPeers( - ctx context.Context, - t *testing.T, - testCase TestCase, + s *state, cfg ConnectPeers, - nodes []*node.Node, - addresses []string, -) chan struct{} { +) { // If we have some database actions prior to connecting the peers, we want to ensure that they had time to // complete before we connect. Otherwise we might wrongly catch them in our wait function. time.Sleep(100 * time.Millisecond) - sourceNode := nodes[cfg.SourceNodeID] - targetNode := nodes[cfg.TargetNodeID] - targetAddress := addresses[cfg.TargetNodeID] + sourceNode := s.nodes[cfg.SourceNodeID] + targetNode := s.nodes[cfg.TargetNodeID] + targetAddress := s.nodeAddresses[cfg.TargetNodeID] - log.Info(ctx, "Parsing bootstrap peers", logging.NewKV("Peers", targetAddress)) + log.Info(s.ctx, "Parsing bootstrap peers", logging.NewKV("Peers", targetAddress)) addrs, err := netutils.ParsePeers([]string{targetAddress}) if err != nil { - t.Fatal(fmt.Sprintf("failed to parse bootstrap peers %v", targetAddress), err) + s.t.Fatal(fmt.Sprintf("failed to parse bootstrap peers %v", targetAddress), err) } - log.Info(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) + log.Info(s.ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) sourceNode.Boostrap(addrs) - // Boostrap triggers a bunch of async stuff for which we have no good way of waiting on. It must be + // Bootstrap triggers a bunch of async stuff for which we have no good way of waiting on. It must be // allowed to complete before documentation begins or it will not even try and sync it. So for now, we // sleep a little. time.Sleep(100 * time.Millisecond) - return setupPeerWaitSync(ctx, t, testCase, cfg, sourceNode, targetNode) + setupPeerWaitSync(s, 0, cfg, sourceNode, targetNode) } func setupPeerWaitSync( - ctx context.Context, - t *testing.T, - testCase TestCase, + s *state, + startIndex int, cfg ConnectPeers, - sourceNode *node.Node, - targetNode *node.Node, -) chan struct{} { + sourceNode *net.Node, + targetNode *net.Node, +) { nodeCollections := map[int][]int{} sourceToTargetEvents := []int{0} targetToSourceEvents := []int{0} waitIndex := 0 - for _, a := range testCase.Actions { - switch action := a.(type) { + for i := startIndex; i < len(s.testCase.Actions); i++ { + switch action := s.testCase.Actions[i].(type) { case SubscribeToCollection: if action.ExpectedError != "" { // If the subscription action is expected to error, then we should do nothing here. @@ -258,11 +251,11 @@ func setupPeerWaitSync( for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ { for i := 0; i < targetToSourceEvents[waitIndex]; i++ { err := sourceNode.WaitForPushLogByPeerEvent(targetNode.PeerID()) - require.NoError(t, err) + require.NoError(s.t, err) } for i := 0; i < sourceToTargetEvents[waitIndex]; i++ { err := targetNode.WaitForPushLogByPeerEvent(sourceNode.PeerID()) - require.NoError(t, err) + require.NoError(s.t, err) } nodeSynced <- struct{}{} } @@ -270,7 +263,7 @@ func setupPeerWaitSync( // Ensure that the wait routine is ready to receive events before we continue. <-ready - return nodeSynced + s.syncChans = append(s.syncChans, nodeSynced) } // collectionSubscribedTo returns true if the collection on the given node @@ -289,52 +282,52 @@ func collectionSubscribedTo( return false } -// configureReplicator configures a replicator relationship between two existing, staarted, nodes. -// It returns a channel that will recieve an empty struct upon sync completion of all expected +// configureReplicator configures a replicator relationship between two existing, started, nodes. +// It returns a channel that will receive an empty struct upon sync completion of all expected // replicator-sync events. // // Any errors generated whilst configuring the peers or waiting on sync will result in a test failure. func configureReplicator( - ctx context.Context, - t *testing.T, - testCase TestCase, + s *state, cfg ConfigureReplicator, - nodes []*node.Node, - addresses []string, -) chan struct{} { +) { // If we have some database actions prior to configuring the replicator, we want to ensure that they had time to // complete before the configuration. Otherwise we might wrongly catch them in our wait function. time.Sleep(100 * time.Millisecond) - sourceNode := nodes[cfg.SourceNodeID] - targetNode := nodes[cfg.TargetNodeID] - targetAddress := addresses[cfg.TargetNodeID] + sourceNode := s.nodes[cfg.SourceNodeID] + targetNode := s.nodes[cfg.TargetNodeID] + targetAddress := s.nodeAddresses[cfg.TargetNodeID] addr, err := ma.NewMultiaddr(targetAddress) - require.NoError(t, err) - - _, err = sourceNode.Peer.SetReplicator(ctx, addr) - require.NoError(t, err) - return setupRepicatorWaitSync(ctx, t, testCase, cfg, sourceNode, targetNode) + require.NoError(s.t, err) + + _, err = sourceNode.Peer.SetReplicator( + s.ctx, + &pb.SetReplicatorRequest{ + Addr: addr.Bytes(), + }, + ) + require.NoError(s.t, err) + setupReplicatorWaitSync(s, 0, cfg, sourceNode, targetNode) } -func setupRepicatorWaitSync( - ctx context.Context, - t *testing.T, - testCase TestCase, +func setupReplicatorWaitSync( + s *state, + startIndex int, cfg ConfigureReplicator, - sourceNode *node.Node, - targetNode *node.Node, -) chan struct{} { + sourceNode *net.Node, + targetNode *net.Node, +) { sourceToTargetEvents := []int{0} targetToSourceEvents := []int{0} docIDsSyncedToSource := map[int]struct{}{} waitIndex := 0 - currentdocID := 0 - for _, a := range testCase.Actions { - switch action := a.(type) { + currentDocID := 0 + for i := startIndex; i < len(s.testCase.Actions); i++ { + switch action := s.testCase.Actions[i].(type) { case CreateDoc: if !action.NodeID.HasValue() || action.NodeID.Value() == cfg.SourceNodeID { - docIDsSyncedToSource[currentdocID] = struct{}{} + docIDsSyncedToSource[currentDocID] = struct{}{} } // A document created on the source or one that is created on all nodes will be sent to the target even @@ -343,7 +336,7 @@ func setupRepicatorWaitSync( sourceToTargetEvents[waitIndex] += 1 } - currentdocID++ + currentDocID++ case DeleteDoc: if _, shouldSyncFromTarget := docIDsSyncedToSource[action.DocID]; shouldSyncFromTarget && @@ -379,11 +372,11 @@ func setupRepicatorWaitSync( for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ { for i := 0; i < targetToSourceEvents[waitIndex]; i++ { err := sourceNode.WaitForPushLogByPeerEvent(targetNode.PeerID()) - require.NoError(t, err) + require.NoError(s.t, err) } for i := 0; i < sourceToTargetEvents[waitIndex]; i++ { err := targetNode.WaitForPushLogByPeerEvent(sourceNode.PeerID()) - require.NoError(t, err) + require.NoError(s.t, err) } nodeSynced <- struct{}{} } @@ -391,36 +384,37 @@ func setupRepicatorWaitSync( // Ensure that the wait routine is ready to receive events before we continue. <-ready - return nodeSynced + s.syncChans = append(s.syncChans, nodeSynced) } // subscribeToCollection sets up a collection subscription on the given node/collection. // // Any errors generated during this process will result in a test failure. func subscribeToCollection( - ctx context.Context, - t *testing.T, - testCase TestCase, + s *state, action SubscribeToCollection, - nodes []*node.Node, - collections [][]client.Collection, ) { - n := nodes[action.NodeID] + n := s.nodes[action.NodeID] schemaIDs := []string{} for _, collectionIndex := range action.CollectionIDs { - if collectionIndex == NonExistantCollectionID { - schemaIDs = append(schemaIDs, "NonExistantCollectionID") + if collectionIndex == NonExistentCollectionID { + schemaIDs = append(schemaIDs, "NonExistentCollectionID") continue } - col := collections[action.NodeID][collectionIndex] + col := s.collections[action.NodeID][collectionIndex] schemaIDs = append(schemaIDs, col.SchemaID()) } - err := n.Peer.AddP2PCollections(schemaIDs) - expectedErrorRaised := AssertError(t, testCase.Description, err, action.ExpectedError) - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + _, err := n.Peer.AddP2PCollections( + s.ctx, + &pb.AddP2PCollectionsRequest{ + Collections: schemaIDs, + }, + ) + expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) // The `n.Peer.AddP2PCollections(colIDs)` call above is calling some asynchronous functions // for the pubsub subscription and those functions can take a bit of time to complete, @@ -432,29 +426,30 @@ func subscribeToCollection( // // Any errors generated during this process will result in a test failure. func unsubscribeToCollection( - ctx context.Context, - t *testing.T, - testCase TestCase, + s *state, action UnsubscribeToCollection, - nodes []*node.Node, - collections [][]client.Collection, ) { - n := nodes[action.NodeID] + n := s.nodes[action.NodeID] schemaIDs := []string{} for _, collectionIndex := range action.CollectionIDs { - if collectionIndex == NonExistantCollectionID { - schemaIDs = append(schemaIDs, "NonExistantCollectionID") + if collectionIndex == NonExistentCollectionID { + schemaIDs = append(schemaIDs, "NonExistentCollectionID") continue } - col := collections[action.NodeID][collectionIndex] + col := s.collections[action.NodeID][collectionIndex] schemaIDs = append(schemaIDs, col.SchemaID()) } - err := n.Peer.RemoveP2PCollections(schemaIDs) - expectedErrorRaised := AssertError(t, testCase.Description, err, action.ExpectedError) - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + _, err := n.Peer.RemoveP2PCollections( + s.ctx, + &pb.RemoveP2PCollectionsRequest{ + Collections: schemaIDs, + }, + ) + expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) // The `n.Peer.RemoveP2PCollections(colIDs)` call above is calling some asynchronous functions // for the pubsub subscription and those functions can take a bit of time to complete, @@ -467,29 +462,29 @@ func unsubscribeToCollection( // // Any errors generated during this process will result in a test failure. func getAllP2PCollections( - ctx context.Context, - t *testing.T, + s *state, action GetAllP2PCollections, - nodes []*node.Node, - collections [][]client.Collection, ) { - expectedCollections := []client.P2PCollection{} + expectedCollections := []*pb.GetAllP2PCollectionsReply_Collection{} for _, collectionIndex := range action.ExpectedCollectionIDs { - col := collections[action.NodeID][collectionIndex] + col := s.collections[action.NodeID][collectionIndex] expectedCollections = append( expectedCollections, - client.P2PCollection{ - ID: col.SchemaID(), + &pb.GetAllP2PCollectionsReply_Collection{ + Id: col.SchemaID(), Name: col.Name(), }, ) } - n := nodes[action.NodeID] - cols, err := n.Peer.GetAllP2PCollections() - require.NoError(t, err) + n := s.nodes[action.NodeID] + cols, err := n.Peer.GetAllP2PCollections( + s.ctx, + &pb.GetAllP2PCollectionsRequest{}, + ) + require.NoError(s.t, err) - assert.Equal(t, expectedCollections, cols) + assert.Equal(s.t, expectedCollections, cols.Collections) } // waitForSync waits for all given wait channels to receive an item signaling completion. @@ -497,19 +492,17 @@ func getAllP2PCollections( // Will fail the test if an event is not received within the expected time interval to prevent tests // from running forever. func waitForSync( - t *testing.T, - testCase TestCase, + s *state, action WaitForSync, - waitChans []chan struct{}, ) { - for _, resultsChan := range waitChans { + for _, resultsChan := range s.syncChans { select { case <-resultsChan: continue // a safety in case the stream hangs - we don't want the tests to run forever. case <-time.After(subscriptionTimeout * 10): - assert.Fail(t, "timeout occured while waiting for data stream", testCase.Description) + assert.Fail(s.t, "timeout occurred while waiting for data stream", s.testCase.Description) } } } diff --git a/tests/integration/query/commits/simple_test.go b/tests/integration/query/commits/simple_test.go index d7411bb03a..ffd558f2ee 100644 --- a/tests/integration/query/commits/simple_test.go +++ b/tests/integration/query/commits/simple_test.go @@ -49,7 +49,7 @@ func TestQueryCommits(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsMultipleDocs(t *testing.T) { @@ -101,7 +101,7 @@ func TestQueryCommitsMultipleDocs(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithSchemaVersionIdField(t *testing.T) { @@ -141,7 +141,7 @@ func TestQueryCommitsWithSchemaVersionIdField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithFieldNameField(t *testing.T) { @@ -178,7 +178,7 @@ func TestQueryCommitsWithFieldNameField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithFieldNameFieldAndUpdate(t *testing.T) { @@ -226,7 +226,7 @@ func TestQueryCommitsWithFieldNameFieldAndUpdate(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithFieldIDField(t *testing.T) { @@ -263,7 +263,7 @@ func TestQueryCommitsWithFieldIDField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithFieldIDFieldWithUpdate(t *testing.T) { @@ -311,5 +311,5 @@ func TestQueryCommitsWithFieldIDFieldWithUpdate(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_cid_test.go b/tests/integration/query/commits/with_cid_test.go index 46a711409e..6d8c30d73e 100644 --- a/tests/integration/query/commits/with_cid_test.go +++ b/tests/integration/query/commits/with_cid_test.go @@ -52,7 +52,7 @@ func TestQueryCommitsWithCid(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithCidForFieldCommit(t *testing.T) { @@ -85,7 +85,7 @@ func TestQueryCommitsWithCidForFieldCommit(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithInvalidCid(t *testing.T) { @@ -113,7 +113,7 @@ func TestQueryCommitsWithInvalidCid(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithInvalidShortCid(t *testing.T) { @@ -141,7 +141,7 @@ func TestQueryCommitsWithInvalidShortCid(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithUnknownCid(t *testing.T) { @@ -169,5 +169,5 @@ func TestQueryCommitsWithUnknownCid(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_collectionid_group_order_test.go b/tests/integration/query/commits/with_collectionid_group_order_test.go index d911e4e316..f39fcbd87c 100644 --- a/tests/integration/query/commits/with_collectionid_group_order_test.go +++ b/tests/integration/query/commits/with_collectionid_group_order_test.go @@ -53,7 +53,7 @@ func TestQueryCommitsWithCollectionIDGroupedAndOrderedDesc(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users", "Companies"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithCollectionIDGroupedAndOrderedAs(t *testing.T) { @@ -93,5 +93,5 @@ func TestQueryCommitsWithCollectionIDGroupedAndOrderedAs(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users", "Companies"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_collectionid_prop_test.go b/tests/integration/query/commits/with_collectionid_prop_test.go index 64ad1b823a..6de4a71e2e 100644 --- a/tests/integration/query/commits/with_collectionid_prop_test.go +++ b/tests/integration/query/commits/with_collectionid_prop_test.go @@ -62,5 +62,5 @@ func TestQueryCommitsWithCollectionID(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users", "Companies"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_depth_test.go b/tests/integration/query/commits/with_depth_test.go index 834f10d21c..12acde76e5 100644 --- a/tests/integration/query/commits/with_depth_test.go +++ b/tests/integration/query/commits/with_depth_test.go @@ -49,7 +49,7 @@ func TestQueryCommitsWithDepth1(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDepth1WithUpdate(t *testing.T) { @@ -98,7 +98,7 @@ func TestQueryCommitsWithDepth1WithUpdate(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDepth2WithUpdate(t *testing.T) { @@ -165,7 +165,7 @@ func TestQueryCommitsWithDepth2WithUpdate(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDepth1AndMultipleDocs(t *testing.T) { @@ -217,5 +217,5 @@ func TestQueryCommitsWithDepth1AndMultipleDocs(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_cid_test.go b/tests/integration/query/commits/with_dockey_cid_test.go index 7900ae1624..c6c9c3e7e0 100644 --- a/tests/integration/query/commits/with_dockey_cid_test.go +++ b/tests/integration/query/commits/with_dockey_cid_test.go @@ -42,7 +42,7 @@ func TestQueryCommitsWithDockeyAndCidForDifferentDoc(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDockeyAndCidForDifferentDocWithUpdate(t *testing.T) { @@ -78,7 +78,7 @@ func TestQueryCommitsWithDockeyAndCidForDifferentDocWithUpdate(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDockeyAndCid(t *testing.T) { @@ -118,5 +118,5 @@ func TestQueryCommitsWithDockeyAndCid(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_count_test.go b/tests/integration/query/commits/with_dockey_count_test.go index 5e60e18ce9..dc64c6847b 100644 --- a/tests/integration/query/commits/with_dockey_count_test.go +++ b/tests/integration/query/commits/with_dockey_count_test.go @@ -53,5 +53,5 @@ func TestQueryCommitsWithDockeyAndLinkCount(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_field_test.go b/tests/integration/query/commits/with_dockey_field_test.go index 1c3b2e65e5..77857a23fe 100644 --- a/tests/integration/query/commits/with_dockey_field_test.go +++ b/tests/integration/query/commits/with_dockey_field_test.go @@ -39,7 +39,7 @@ func TestQueryCommitsWithDockeyAndUnknownField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDockeyAndUnknownFieldId(t *testing.T) { @@ -65,7 +65,7 @@ func TestQueryCommitsWithDockeyAndUnknownFieldId(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This test is for documentation reasons only. This is not @@ -93,7 +93,7 @@ func TestQueryCommitsWithDockeyAndField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This test is for documentation reasons only. This is not @@ -125,7 +125,7 @@ func TestQueryCommitsWithDockeyAndFieldId(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This test is for documentation reasons only. This is not @@ -157,5 +157,5 @@ func TestQueryCommitsWithDockeyAndCompositeFieldId(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_group_order_test.go b/tests/integration/query/commits/with_dockey_group_order_test.go index 4fd9c48d47..d29a3683ea 100644 --- a/tests/integration/query/commits/with_dockey_group_order_test.go +++ b/tests/integration/query/commits/with_dockey_group_order_test.go @@ -53,5 +53,5 @@ func TestQueryCommitsOrderedAndGroupedByDocKey(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_limit_offset_test.go b/tests/integration/query/commits/with_dockey_limit_offset_test.go index 682024f64b..3ec10284e3 100644 --- a/tests/integration/query/commits/with_dockey_limit_offset_test.go +++ b/tests/integration/query/commits/with_dockey_limit_offset_test.go @@ -67,5 +67,5 @@ func TestQueryCommitsWithDockeyAndLimitAndOffset(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_limit_test.go b/tests/integration/query/commits/with_dockey_limit_test.go index 2abf482120..4b87bfa307 100644 --- a/tests/integration/query/commits/with_dockey_limit_test.go +++ b/tests/integration/query/commits/with_dockey_limit_test.go @@ -60,5 +60,5 @@ func TestQueryCommitsWithDockeyAndLimit(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_order_limit_offset_test.go b/tests/integration/query/commits/with_dockey_order_limit_offset_test.go index 842a454354..1b1a8fe885 100644 --- a/tests/integration/query/commits/with_dockey_order_limit_offset_test.go +++ b/tests/integration/query/commits/with_dockey_order_limit_offset_test.go @@ -70,5 +70,5 @@ func TestQueryCommitsWithDockeyAndOrderAndLimitAndOffset(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_order_test.go b/tests/integration/query/commits/with_dockey_order_test.go index dd47581b0f..c2d1aac620 100644 --- a/tests/integration/query/commits/with_dockey_order_test.go +++ b/tests/integration/query/commits/with_dockey_order_test.go @@ -68,7 +68,7 @@ func TestQueryCommitsWithDockeyAndOrderHeightDesc(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDockeyAndOrderHeightAsc(t *testing.T) { @@ -123,7 +123,7 @@ func TestQueryCommitsWithDockeyAndOrderHeightAsc(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDockeyAndOrderCidDesc(t *testing.T) { @@ -178,7 +178,7 @@ func TestQueryCommitsWithDockeyAndOrderCidDesc(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDockeyAndOrderCidAsc(t *testing.T) { @@ -233,7 +233,7 @@ func TestQueryCommitsWithDockeyAndOrderCidAsc(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDockeyAndOrderAndMultiUpdatesCidAsc(t *testing.T) { @@ -318,5 +318,5 @@ func TestQueryCommitsWithDockeyAndOrderAndMultiUpdatesCidAsc(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_prop_test.go b/tests/integration/query/commits/with_dockey_prop_test.go index 060d8e51cf..daf21ba1c7 100644 --- a/tests/integration/query/commits/with_dockey_prop_test.go +++ b/tests/integration/query/commits/with_dockey_prop_test.go @@ -49,5 +49,5 @@ func TestQueryCommitsWithDockeyProperty(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_test.go b/tests/integration/query/commits/with_dockey_test.go index 8fe97edb03..8e21007f3e 100644 --- a/tests/integration/query/commits/with_dockey_test.go +++ b/tests/integration/query/commits/with_dockey_test.go @@ -39,7 +39,7 @@ func TestQueryCommitsWithUnknownDockey(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDockey(t *testing.T) { @@ -75,7 +75,7 @@ func TestQueryCommitsWithDockey(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDockeyAndLinks(t *testing.T) { @@ -127,7 +127,7 @@ func TestQueryCommitsWithDockeyAndLinks(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithDockeyAndUpdate(t *testing.T) { @@ -182,7 +182,7 @@ func TestQueryCommitsWithDockeyAndUpdate(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This test is for documentation reasons only. This is not @@ -266,5 +266,5 @@ func TestQueryCommitsWithDockeyAndUpdateAndLinks(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_dockey_typename_test.go b/tests/integration/query/commits/with_dockey_typename_test.go index 137167389e..106d0ff326 100644 --- a/tests/integration/query/commits/with_dockey_typename_test.go +++ b/tests/integration/query/commits/with_dockey_typename_test.go @@ -53,5 +53,5 @@ func TestQueryCommitsWithDockeyWithTypeName(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_field_test.go b/tests/integration/query/commits/with_field_test.go index 91cd3d0215..e355db1710 100644 --- a/tests/integration/query/commits/with_field_test.go +++ b/tests/integration/query/commits/with_field_test.go @@ -41,7 +41,7 @@ func TestQueryCommitsWithField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This test is for documentation reasons only. This is not @@ -73,7 +73,7 @@ func TestQueryCommitsWithFieldId(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This test is for documentation reasons only. This is not @@ -105,7 +105,7 @@ func TestQueryCommitsWithCompositeFieldId(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This test is for documentation reasons only. This is not @@ -139,5 +139,5 @@ func TestQueryCommitsWithCompositeFieldIdWithReturnedSchemaVersionId(t *testing. }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/commits/with_group_test.go b/tests/integration/query/commits/with_group_test.go index f382d5a3d9..86822aac06 100644 --- a/tests/integration/query/commits/with_group_test.go +++ b/tests/integration/query/commits/with_group_test.go @@ -53,7 +53,7 @@ func TestQueryCommitsWithGroupBy(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { @@ -115,7 +115,7 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This is an odd test, but we need to make sure it works @@ -170,7 +170,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithGroupByDocKey(t *testing.T) { @@ -224,7 +224,7 @@ func TestQueryCommitsWithGroupByDocKey(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithGroupByFieldName(t *testing.T) { @@ -267,7 +267,7 @@ func TestQueryCommitsWithGroupByFieldName(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithGroupByFieldNameWithChild(t *testing.T) { @@ -334,7 +334,7 @@ func TestQueryCommitsWithGroupByFieldNameWithChild(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithGroupByFieldID(t *testing.T) { @@ -377,7 +377,7 @@ func TestQueryCommitsWithGroupByFieldID(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryCommitsWithGroupByFieldIDWithChild(t *testing.T) { @@ -444,5 +444,5 @@ func TestQueryCommitsWithGroupByFieldIDWithChild(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/latest_commits/with_collectionid_prop_test.go b/tests/integration/query/latest_commits/with_collectionid_prop_test.go index a514e506af..afdd6ae7ee 100644 --- a/tests/integration/query/latest_commits/with_collectionid_prop_test.go +++ b/tests/integration/query/latest_commits/with_collectionid_prop_test.go @@ -62,5 +62,5 @@ func TestQueryLastCommitsWithCollectionIdProperty(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users", "Companies"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/latest_commits/with_dockey_prop_test.go b/tests/integration/query/latest_commits/with_dockey_prop_test.go index f323bebb83..b7ffd80d65 100644 --- a/tests/integration/query/latest_commits/with_dockey_prop_test.go +++ b/tests/integration/query/latest_commits/with_dockey_prop_test.go @@ -43,5 +43,5 @@ func TestQueryLastCommitsWithDockeyProperty(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many/with_average_filter_test.go b/tests/integration/query/one_to_many/with_average_filter_test.go index a6c52c9e71..1404be5962 100644 --- a/tests/integration/query/one_to_many/with_average_filter_test.go +++ b/tests/integration/query/one_to_many/with_average_filter_test.go @@ -59,6 +59,7 @@ func TestQueryOneToManyWithAverageAndChildNeNilFilterSharesJoinField(t *testing. }, }, "selectNode": dataMap{ + "_keys": nil, "filter": nil, "typeIndexJoin": dataMap{ "joinType": "typeJoinMany", @@ -80,6 +81,7 @@ func TestQueryOneToManyWithAverageAndChildNeNilFilterSharesJoinField(t *testing. "subType": dataMap{ "selectTopNode": dataMap{ "selectNode": dataMap{ + "_keys": nil, "filter": nil, "scanNode": dataMap{ "filter": dataMap{ diff --git a/tests/integration/query/one_to_many/with_count_filter_test.go b/tests/integration/query/one_to_many/with_count_filter_test.go index 008b262981..9deecae01f 100644 --- a/tests/integration/query/one_to_many/with_count_filter_test.go +++ b/tests/integration/query/one_to_many/with_count_filter_test.go @@ -184,6 +184,7 @@ func TestQueryOneToManyWithCountWithFilterAndChildFilterSharesJoinField(t *testi }, }, "selectNode": dataMap{ + "_keys": nil, "filter": nil, "typeIndexJoin": dataMap{ "joinType": "typeJoinMany", @@ -205,6 +206,7 @@ func TestQueryOneToManyWithCountWithFilterAndChildFilterSharesJoinField(t *testi "subType": dataMap{ "selectTopNode": dataMap{ "selectNode": dataMap{ + "_keys": nil, "filter": nil, "scanNode": dataMap{ "filter": dataMap{ @@ -256,6 +258,7 @@ func TestQueryOneToManyWithCountAndChildFilterDoesNotShareJoinField(t *testing.T "selectTopNode": dataMap{ "countNode": dataMap{ "selectNode": dataMap{ + "_keys": nil, "filter": nil, "parallelNode": []dataMap{ { @@ -278,6 +281,7 @@ func TestQueryOneToManyWithCountAndChildFilterDoesNotShareJoinField(t *testing.T "subType": dataMap{ "selectTopNode": dataMap{ "selectNode": dataMap{ + "_keys": nil, "filter": nil, "scanNode": dataMap{ "collectionID": "1", @@ -320,6 +324,7 @@ func TestQueryOneToManyWithCountAndChildFilterDoesNotShareJoinField(t *testing.T "subType": dataMap{ "selectTopNode": dataMap{ "selectNode": dataMap{ + "_keys": nil, "filter": nil, "scanNode": dataMap{ "collectionID": "1", diff --git a/tests/integration/query/one_to_many/with_filter_related_id_test.go b/tests/integration/query/one_to_many/with_filter_related_id_test.go new file mode 100644 index 0000000000..87c895e5c1 --- /dev/null +++ b/tests/integration/query/one_to_many/with_filter_related_id_test.go @@ -0,0 +1,404 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_many + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryFromManySideWithEqFilterOnRelatedType(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query from many side with _eq filter on related field type.", + + Request: `query { + Book(filter: {author: {_key: {_eq: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3"}}}) { + name + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + Results: []map[string]any{ + {"name": "The Client"}, + {"name": "Painted House"}, + {"name": "A Time for Mercy"}, + }, + } + + executeTestCase(t, test) +} + +func TestQueryFromManySideWithFilterOnRelatedObjectID(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query from many side with filter on related field.", + + Request: `query { + Book(filter: {author_id: {_eq: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3"}}) { + name + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + Results: []map[string]any{ + {"name": "The Client"}, + {"name": "Painted House"}, + {"name": "A Time for Mercy"}, + }, + } + + executeTestCase(t, test) +} + +func TestQueryFromManySideWithSameFiltersInDifferentWayOnRelatedType(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query from many side with same filters in different way on related type.", + + Request: `query { + Book( + filter: { + author: {_key: {_eq: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3"}}, + author_id: {_eq: "bae-41598f0c-19bc-5da6-813b-e80f14a10df3"} + } + ) { + name + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + Results: []map[string]any{ + {"name": "The Client"}, + {"name": "Painted House"}, + {"name": "A Time for Mercy"}, + }, + } + + executeTestCase(t, test) +} + +func TestQueryFromSingleSideWithEqFilterOnRelatedType(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query from single side with _eq filter on related field type.", + + Request: `query { + Author(filter: {published: {_key: {_eq: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d"}}}) { + name + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + Results: []map[string]any{ + { + "name": "John Grisham", + }, + }, + } + + executeTestCase(t, test) +} + +func TestQueryFromSingleSideWithFilterOnRelatedObjectID_Error(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query from single side with filter on related field.", + + Request: `query { + Author(filter: {published_id: {_eq: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d"}}) { + name + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + + ExpectedError: "Argument \"filter\" has invalid value {published_id: {_eq: \"bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d\"}}.\nIn field \"published_id\": Unknown field.", + } + + executeTestCase(t, test) +} diff --git a/tests/integration/query/one_to_many/with_group_related_id_alias_test.go b/tests/integration/query/one_to_many/with_group_related_id_alias_test.go new file mode 100644 index 0000000000..8e2223e324 --- /dev/null +++ b/tests/integration/query/one_to_many/with_group_related_id_alias_test.go @@ -0,0 +1,774 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_many + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +// TODO: Don't return grouped field if not selected. [https://github.com/sourcenetwork/defradb/issues/1582]. +func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAlias(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query with groupBy on related field alias (from many side).", + + Request: `query { + Book(groupBy: [author]) { + _group { + name + rating + author { + name + age + } + } + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + Results: []map[string]any{ + { + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + }, + }, + { + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "_group": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + }, + }, + { + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + "_group": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2.0, + "author": map[string]any{ + "age": uint64(327), + "name": "Simon Pelloutier", + }, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +// TODO: Don't return grouped field if not selected. [https://github.com/sourcenetwork/defradb/issues/1582]. +func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAliasAndRelatedSelection(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query with groupBy on related field alias (from many side).", + + Request: `query { + Book(groupBy: [author]) { + author { + _key + name + } + _group { + name + rating + author { + name + age + } + } + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + Results: []map[string]any{ + { + "author": map[string]any{ + "name": "Voltaire", + "_key": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + }, + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + }, + }, + { + "author": map[string]any{ + "name": "John Grisham", + "_key": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + }, + "_group": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + }, + }, + { + "author": map[string]any{ + "name": "Simon Pelloutier", + "_key": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + }, + "_group": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2.0, + "author": map[string]any{ + "age": uint64(327), + "name": "Simon Pelloutier", + }, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySideUsingAlias(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query with groupBy on related field alias, with id selection & related selection (from many side).", + + Request: `query { + Book(groupBy: [author]) { + author_id + _group { + name + rating + author { + name + age + } + } + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + Results: []map[string]any{ + { + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + }, + }, + { + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "_group": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + }, + }, + { + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + "_group": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2.0, + "author": map[string]any{ + "age": uint64(327), + "name": "Simon Pelloutier", + }, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySideUsingAliasAndRelatedSelection(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query with groupBy on related field alias, with id selection & related selection (from many side).", + + Request: `query { + Book(groupBy: [author]) { + author_id + author { + _key + name + } + _group { + name + rating + author { + name + age + } + } + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + Results: []map[string]any{ + { + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + "author": map[string]any{ + "name": "Voltaire", + "_key": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + }, + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + }, + }, + { + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "author": map[string]any{ + "name": "John Grisham", + "_key": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + }, + "_group": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + }, + }, + { + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + "author": map[string]any{ + "name": "Simon Pelloutier", + "_key": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + }, + "_group": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2.0, + "author": map[string]any{ + "age": uint64(327), + "name": "Simon Pelloutier", + }, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +// TODO: Don't return grouped field if not selected. [https://github.com/sourcenetwork/defradb/issues/1582]. +func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromSingleSideUsingAlias(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "One-to-many query with groupBy on related id field alias (from single side).", + Request: `query { + Author(groupBy: [published]) { + _group { + name + } + } + }`, + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + + ExpectedError: "invalid field value to groupBy. Field: published", + } + + executeTestCase(t, test) +} + +func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromSingleSideUsingAlias(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "One-to-many query with groupBy on related id field alias, with id selection (from single side).", + Request: `query { + Author(groupBy: [published]) { + published_id + _group { + name + } + } + }`, + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + + ExpectedError: "Cannot query field \"published_id\" on type \"Author\". ", + } + + executeTestCase(t, test) +} diff --git a/tests/integration/query/one_to_many/with_group_related_id_test.go b/tests/integration/query/one_to_many/with_group_related_id_test.go new file mode 100644 index 0000000000..535e8665cd --- /dev/null +++ b/tests/integration/query/one_to_many/with_group_related_id_test.go @@ -0,0 +1,457 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_many + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +// TODO: Don't return grouped field if not selected. [https://github.com/sourcenetwork/defradb/issues/1582]. +func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "One-to-many query with groupBy on related id (from many side).", + Request: `query { + Book(groupBy: [author_id]) { + _group { + name + rating + author { + name + age + } + } + } + }`, + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + Results: []map[string]any{ + { + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + }, + }, + { + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "_group": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + }, + }, + { + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + "_group": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2.0, + "author": map[string]any{ + "age": uint64(327), + "name": "Simon Pelloutier", + }, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySide(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "One-to-many query with groupBy on related id, with id selection (from many side).", + Request: `query { + Book(groupBy: [author_id]) { + author_id + _group { + name + rating + author { + name + age + } + } + } + }`, + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + Results: []map[string]any{ + { + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + "_group": []map[string]any{ + { + "name": "Candide", + "rating": 4.95, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + { + "name": "Zadig", + "rating": 4.91, + "author": map[string]any{ + "age": uint64(327), + "name": "Voltaire", + }, + }, + }, + }, + { + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "_group": []map[string]any{ + { + "name": "The Client", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "rating": 4.9, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + { + "name": "A Time for Mercy", + "rating": 4.5, + "author": map[string]any{ + "age": uint64(65), + "name": "John Grisham", + }, + }, + }, + }, + { + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + "_group": []map[string]any{ + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2.0, + "author": map[string]any{ + "age": uint64(327), + "name": "Simon Pelloutier", + }, + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + +// TODO: Don't return grouped field if not selected. [https://github.com/sourcenetwork/defradb/issues/1582]. +func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromSingleSide(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "One-to-many query with groupBy on related id (from single side).", + Request: `query { + Author(groupBy: [published_id]) { + _group { + name + published { + name + rating + } + } + } + }`, + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + + ExpectedError: "Argument \"groupBy\" has invalid value [published_id].\nIn element #1: Expected type \"AuthorFields\", found published_id.", + } + + executeTestCase(t, test) +} + +func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromSingleSide(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "One-to-many query with groupBy on related id, with id selection (from single side).", + Request: `query { + Author(groupBy: [published_id]) { + published_id + _group { + name + published { + name + rating + } + } + } + }`, + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + + ExpectedError: "Argument \"groupBy\" has invalid value [published_id].\nIn element #1: Expected type \"AuthorFields\", found published_id.", + } + + executeTestCase(t, test) +} diff --git a/tests/integration/query/one_to_many/with_id_field_test.go b/tests/integration/query/one_to_many/with_id_field_test.go new file mode 100644 index 0000000000..455ae3ab78 --- /dev/null +++ b/tests/integration/query/one_to_many/with_id_field_test.go @@ -0,0 +1,88 @@ +// Copyright 2022 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_many + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +// This documents unwanted behaviour, see https://github.com/sourcenetwork/defradb/issues/1520 +func TestQueryOneToManyWithIdFieldOnPrimary(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-many relation primary direction, id field with name clash on primary side", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + author_id: Int + author: Author + } + + type Author { + name: String + published: [Book] + } + `, + }, + testUtils.CreateDoc{ + // bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed + CollectionID: 1, + Doc: `{ + "name": "John Grisham" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Painted House", + "author_id": 123456 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "A Time for Mercy", + "author_id": "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed" + }`, + }, + testUtils.Request{ + Request: `query { + Book { + name + author_id + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "A Time for Mercy", + "author_id": "bae-2edb7fdd-cad7-5ad4-9c7d-6920245a96ed", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + { + "name": "Painted House", + "author_id": uint64(123456), + "author": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/query/one_to_many/with_related_id_test.go b/tests/integration/query/one_to_many/with_related_id_test.go new file mode 100644 index 0000000000..bcfef26cfe --- /dev/null +++ b/tests/integration/query/one_to_many/with_related_id_test.go @@ -0,0 +1,194 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_many + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryOneToManyWithRelatedTypeIDFromManySide(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query with related id (from many side).", + + Request: `query { + Book { + name + author_id + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + + Results: []map[string]any{ + { + "name": "Candide", + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + }, + { + "name": "Zadig", + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3", + }, + { + "name": "The Client", + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + }, + { + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c", + }, + { + "name": "Painted House", + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + }, + { + "name": "A Time for Mercy", + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + }, + }, + } + + executeTestCase(t, test) +} + +func TestQueryOneToManyWithRelatedTypeIDFromSingleSide(t *testing.T) { + test := testUtils.RequestTestCase{ + + Description: "One-to-many query with related id (from single side).", + + Request: `query { + Author { + name + author_id + } + }`, + + Docs: map[int][]string{ + //books + 0: { // bae-fd541c25-229e-5280-b44b-e5c2af3e374d + `{ + "name": "Painted House", + "rating": 4.9, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "A Time for Mercy", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "The Client", + "rating": 4.5, + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3" + }`, + `{ + "name": "Candide", + "rating": 4.95, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Zadig", + "rating": 4.91, + "author_id": "bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3" + }`, + `{ + "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", + "rating": 2, + "author_id": "bae-09d33399-197a-5b98-b135-4398f2b6de4c" + }`, + }, + + //authors + 1: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-7accaba8-ea9d-54b1-92f4-4a7ac5de88b3 + `{ + "name": "Voltaire", + "age": 327, + "verified": true + }`, + // bae-09d33399-197a-5b98-b135-4398f2b6de4c + `{ + "name": "Simon Pelloutier", + "age": 327, + "verified": true + }`, + }, + }, + + ExpectedError: "Cannot query field \"author_id\" on type \"Author\".", + } + + executeTestCase(t, test) +} diff --git a/tests/integration/query/one_to_many_multiple/with_multiple_filter_test.go b/tests/integration/query/one_to_many_multiple/with_multiple_filter_test.go new file mode 100644 index 0000000000..26a15729ea --- /dev/null +++ b/tests/integration/query/one_to_many_multiple/with_multiple_filter_test.go @@ -0,0 +1,93 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_many_multiple + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryOneToManyMultipleWithMultipleManyFilters(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "One-to-many relation query from one side with multiple many fitlers", + Request: `query { + Author(filter: {name: {_eq: "John Grisham"}, books: {score: {_eq: 1}}, articles: {rating: {_eq: 3}}}) { + name + } + }`, + Docs: map[int][]string{ + //articles + 0: { + `{ + "name": "After Guantánamo, Another Injustice", + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "rating": 3 + }`, + `{ + "name": "To my dear readers", + "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", + "rating": 2 + }`, + `{ + "name": "Twinklestar's Favourite Xmas Cookie", + "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", + "rating": 1 + }`, + }, + //books + 1: { + `{ + "name": "Painted House", + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "score": 1 + }`, + `{ + "name": "A Time for Mercy", + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "score": 2 + }`, + `{ + "name": "Sooley", + "author_id": "bae-41598f0c-19bc-5da6-813b-e80f14a10df3", + "score": 3 + }`, + `{ + "name": "Theif Lord", + "author_id": "bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04", + "score": 4 + }`, + }, + //authors + 2: { + // bae-41598f0c-19bc-5da6-813b-e80f14a10df3 + `{ + "name": "John Grisham", + "age": 65, + "verified": true + }`, + // bae-b769708d-f552-5c3d-a402-ccfd7ac7fb04 + `{ + "name": "Cornelia Funke", + "age": 62, + "verified": false + }`, + }, + }, + Results: []map[string]any{ + { + "name": "John Grisham", + }, + }, + } + + executeTestCase(t, test) +} diff --git a/tests/integration/query/one_to_many_to_one/joins_test.go b/tests/integration/query/one_to_many_to_one/joins_test.go index 230b85882e..e30b1b699a 100644 --- a/tests/integration/query/one_to_many_to_one/joins_test.go +++ b/tests/integration/query/one_to_many_to_one/joins_test.go @@ -233,5 +233,5 @@ func TestOneToManyToOneJoinsAreLinkedProperly(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_to_one/simple_test.go b/tests/integration/query/one_to_many_to_one/simple_test.go index ca1f6cc017..62a9561ae1 100644 --- a/tests/integration/query/one_to_many_to_one/simple_test.go +++ b/tests/integration/query/one_to_many_to_one/simple_test.go @@ -140,5 +140,5 @@ func TestQueryOneToOneRelations(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_to_one/with_filter_test.go b/tests/integration/query/one_to_many_to_one/with_filter_test.go index d7e5d45c5d..7f15fe58a0 100644 --- a/tests/integration/query/one_to_many_to_one/with_filter_test.go +++ b/tests/integration/query/one_to_many_to_one/with_filter_test.go @@ -123,7 +123,7 @@ func TestQueryComplexWithDeepFilterOnRenderedChildren(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } func TestOneToManyToOneWithSumOfDeepFilterSubTypeOfBothDescAndAsc(t *testing.T) { @@ -163,7 +163,7 @@ func TestOneToManyToOneWithSumOfDeepFilterSubTypeOfBothDescAndAsc(t *testing.T) }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } func TestOneToManyToOneWithSumOfDeepFilterSubTypeAndDeepOrderBySubtypeOppositeDirections(t *testing.T) { @@ -211,5 +211,76 @@ func TestOneToManyToOneWithSumOfDeepFilterSubTypeAndDeepOrderBySubtypeOppositeDi }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) +} + +func TestOneToManyToOneWithTwoLevelDeepFilter(t *testing.T) { + test := testUtils.TestCase{ + Description: "1-N-1 two level deep filter", + Actions: []any{ + gqlSchemaOneToManyToOne(), + createDocsWith6BooksAnd5Publishers(), + testUtils.Request{ + Request: `query { + Author (filter: {book: {publisher: {yearOpened: { _ge: 2020}}}}){ + name + book { + name + publisher { + yearOpened + } + } + } + }`, + Results: []map[string]any{ + { + "book": []map[string]any{ + { + "name": "The Associate", + "publisher": nil, + }, + { + "name": "Sooley", + "publisher": map[string]any{ + "yearOpened": uint64(1999), + }, + }, + { + "name": "Theif Lord", + "publisher": map[string]any{ + "yearOpened": uint64(2020), + }, + }, + { + "name": "Painted House", + "publisher": map[string]any{ + "yearOpened": uint64(1995), + }, + }, + { + "name": "A Time for Mercy", + "publisher": map[string]any{ + "yearOpened": uint64(2013), + }, + }, + }, + "name": "John Grisham", + }, + { + "book": []map[string]any{ + { + "name": "The Rooster Bar", + "publisher": map[string]any{ + "yearOpened": uint64(2022), + }, + }, + }, + "name": "Cornelia Funke", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_to_one/with_order_limit_test.go b/tests/integration/query/one_to_many_to_one/with_order_limit_test.go index d7f5bd43b1..fecfdc980d 100644 --- a/tests/integration/query/one_to_many_to_one/with_order_limit_test.go +++ b/tests/integration/query/one_to_many_to_one/with_order_limit_test.go @@ -71,5 +71,5 @@ func TestOneToManyToOneDeepOrderBySubTypeOfBothDescAndAsc(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_to_one/with_order_test.go b/tests/integration/query/one_to_many_to_one/with_order_test.go index 651f0ba174..41bb88f544 100644 --- a/tests/integration/query/one_to_many_to_one/with_order_test.go +++ b/tests/integration/query/one_to_many_to_one/with_order_test.go @@ -84,7 +84,7 @@ func TestMultipleOrderByWithDepthGreaterThanOne(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } func TestMultipleOrderByWithDepthGreaterThanOneOrderSwitched(t *testing.T) { @@ -155,5 +155,5 @@ func TestMultipleOrderByWithDepthGreaterThanOneOrderSwitched(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_to_one/with_sum_order_limit_test.go b/tests/integration/query/one_to_many_to_one/with_sum_order_limit_test.go index 1fef64ef16..bcb344c1e9 100644 --- a/tests/integration/query/one_to_many_to_one/with_sum_order_limit_test.go +++ b/tests/integration/query/one_to_many_to_one/with_sum_order_limit_test.go @@ -63,7 +63,7 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeDescDirec }, }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeAscDirections(t *testing.T) { @@ -116,7 +116,7 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeAscDirect }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } func TestOneToManyToOneWithSumOfDeepOrderBySubTypeOfBothDescAndAsc(t *testing.T) { @@ -156,7 +156,7 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeOfBothDescAndAsc(t *testing.T) }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeOppositeDirections(t *testing.T) { @@ -208,5 +208,5 @@ func TestOneToManyToOneWithSumOfDeepOrderBySubTypeAndDeepOrderBySubtypeOppositeD }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_many_to_one/with_sum_test.go b/tests/integration/query/one_to_many_to_one/with_sum_test.go index a8beb29bba..0fadbfb138 100644 --- a/tests/integration/query/one_to_many_to_one/with_sum_test.go +++ b/tests/integration/query/one_to_many_to_one/with_sum_test.go @@ -112,5 +112,5 @@ func TestQueryWithSumOnInlineAndSumOnOneToManyField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book", "Publisher"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_one/with_group_related_id_alias_test.go b/tests/integration/query/one_to_one/with_group_related_id_alias_test.go new file mode 100644 index 0000000000..cbe75c9318 --- /dev/null +++ b/tests/integration/query/one_to_one/with_group_related_id_alias_test.go @@ -0,0 +1,191 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_one + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryOneToOneWithGroupRelatedIDAlias(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-one relation query with group by related id alias (primary side)", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + author: Author @primary + } + + type Author { + name: String + published: Book + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-3d236f89-6a31-5add-a36a-27971a2eac76 + Doc: `{ + "name": "Painted House" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 + Doc: `{ + "name": "Go Guide for Rust developers" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 + Doc: `{ + "name": "John Grisham", + "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c + Doc: `{ + "name": "Andrew Lone", + "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" + }`, + }, + testUtils.Request{ + Request: `query { + Book(groupBy: [author]) { + author_id + author { + name + } + _group { + name + } + } + }`, + Results: []map[string]any{ + { + "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "author": map[string]any{ + "name": "John Grisham", + }, + "_group": []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + { + "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "author": map[string]any{ + "name": "Andrew Lone", + }, + "_group": []map[string]any{ + { + "name": "Go Guide for Rust developers", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +// This test documents unwanted behaviour, see: +// https://github.com/sourcenetwork/defradb/issues/1654 +func TestQueryOneToOneWithGroupRelatedIDAliasFromSecondary(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-one relation query with group by related id alias (secondary side)", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + author: Author + } + + type Author { + name: String + published: Book @primary + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-3d236f89-6a31-5add-a36a-27971a2eac76 + Doc: `{ + "name": "Painted House" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 + Doc: `{ + "name": "Go Guide for Rust developers" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 + Doc: `{ + "name": "John Grisham", + "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c + Doc: `{ + "name": "Andrew Lone", + "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" + }`, + }, + testUtils.Request{ + Request: `query { + Book(groupBy: [author]) { + author_id + author { + name + } + _group { + name + } + } + }`, + Results: []map[string]any{ + { + "author_id": nil, + "author": map[string]any{ + "name": "Andrew Lone", + }, + "_group": []map[string]any{ + { + "name": "Painted House", + }, + { + "name": "Go Guide for Rust developers", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/query/one_to_one/with_group_related_id_test.go b/tests/integration/query/one_to_one/with_group_related_id_test.go new file mode 100644 index 0000000000..50a1111475 --- /dev/null +++ b/tests/integration/query/one_to_one/with_group_related_id_test.go @@ -0,0 +1,176 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_one + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryOneToOneWithGroupRelatedID(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-one relation query with group by related id (primary side)", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + author: Author @primary + } + + type Author { + name: String + published: Book + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-3d236f89-6a31-5add-a36a-27971a2eac76 + Doc: `{ + "name": "Painted House" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 + Doc: `{ + "name": "Go Guide for Rust developers" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 + Doc: `{ + "name": "John Grisham", + "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c + Doc: `{ + "name": "Andrew Lone", + "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" + }`, + }, + testUtils.Request{ + Request: `query { + Book(groupBy: [author_id]) { + author_id + _group { + name + } + } + }`, + Results: []map[string]any{ + { + "author_id": "bae-6b624301-3d0a-5336-bd2c-ca00bca3de85", + "_group": []map[string]any{ + { + "name": "Painted House", + }, + }, + }, + { + "author_id": "bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c", + "_group": []map[string]any{ + { + "name": "Go Guide for Rust developers", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +// This test documents unwanted behaviour, see: +// https://github.com/sourcenetwork/defradb/issues/1654 +func TestQueryOneToOneWithGroupRelatedIDFromSecondary(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-one relation query with group by related id (secondary side)", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + author: Author + } + + type Author { + name: String + published: Book @primary + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-3d236f89-6a31-5add-a36a-27971a2eac76 + Doc: `{ + "name": "Painted House" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6 + Doc: `{ + "name": "Go Guide for Rust developers" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-6b624301-3d0a-5336-bd2c-ca00bca3de85 + Doc: `{ + "name": "John Grisham", + "published_id": "bae-3d236f89-6a31-5add-a36a-27971a2eac76" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // bae-92fa9dcb-c1ee-5b84-b2f6-e9437c7f261c + Doc: `{ + "name": "Andrew Lone", + "published_id": "bae-d6627fea-8bf7-511c-bcf9-bac4212bddd6" + }`, + }, + testUtils.Request{ + Request: `query { + Book(groupBy: [author_id]) { + author_id + _group { + name + } + } + }`, + Results: []map[string]any{ + { + "author_id": nil, + "_group": []map[string]any{ + { + "name": "Painted House", + }, + { + "name": "Go Guide for Rust developers", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/query/one_to_one/with_id_field_test.go b/tests/integration/query/one_to_one/with_id_field_test.go new file mode 100644 index 0000000000..ecb0929981 --- /dev/null +++ b/tests/integration/query/one_to_one/with_id_field_test.go @@ -0,0 +1,117 @@ +// Copyright 2022 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_one + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryOneToOneWithIdFieldOnSecondary(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-one relation secondary direction, id field with name clash on secondary side", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + author_id: Int + author: Author + } + + type Author { + name: String + published: Book @primary + } + `, + }, + testUtils.CreateDoc{ + // bae-d82dbe47-9df1-5e33-bd87-f92e9c378161 + CollectionID: 0, + Doc: `{ + "name": "Painted House", + "author_id": 123456 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "John Grisham", + "published_id": "bae-d82dbe47-9df1-5e33-bd87-f92e9c378161" + }`, + }, + testUtils.Request{ + Request: `query { + Book { + name + author_id + author { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Painted House", + "author_id": uint64(123456), + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +// This documents unwanted behaviour, see https://github.com/sourcenetwork/defradb/issues/1520 +func TestQueryOneToOneWithIdFieldOnPrimary(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-one relation primary direction, id field with name clash on primary side", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + author_id: Int + author: Author @primary + } + + type Author { + name: String + published: Book + } + `, + }, + testUtils.CreateDoc{ + // bae-d82dbe47-9df1-5e33-bd87-f92e9c378161 + CollectionID: 0, + Doc: `{ + "name": "Painted House", + "author_id": 123456 + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + Doc: `{ + "name": "John Grisham", + "published_id": "bae-d82dbe47-9df1-5e33-bd87-f92e9c378161" + }`, + ExpectedError: "value doesn't contain number; it contains string", + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/query/one_to_one_to_many/simple_test.go b/tests/integration/query/one_to_one_to_many/simple_test.go index 76810a5dc2..184d2eae1b 100644 --- a/tests/integration/query/one_to_one_to_many/simple_test.go +++ b/tests/integration/query/one_to_one_to_many/simple_test.go @@ -88,7 +88,7 @@ func TestQueryOneToOneToMany(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Indicator", "Observable", "Observation"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToOneToManyFromSecondaryOnOneToMany(t *testing.T) { @@ -165,7 +165,7 @@ func TestQueryOneToOneToManyFromSecondaryOnOneToMany(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Indicator", "Observable", "Observation"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToOneToManyFromSecondaryOnOneToOne(t *testing.T) { @@ -240,7 +240,7 @@ func TestQueryOneToOneToManyFromSecondaryOnOneToOne(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Indicator", "Observable", "Observation"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToOneToManyFromSecondary(t *testing.T) { @@ -317,5 +317,5 @@ func TestQueryOneToOneToManyFromSecondary(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Indicator", "Observable", "Observation"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_one_to_one/simple_test.go b/tests/integration/query/one_to_one_to_one/simple_test.go index d915b9f4ff..11f75e38f9 100644 --- a/tests/integration/query/one_to_one_to_one/simple_test.go +++ b/tests/integration/query/one_to_one_to_one/simple_test.go @@ -119,7 +119,7 @@ func TestQueryOneToOneToOne(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Publisher", "Book", "Author"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToOneToOneSecondaryThenPrimary(t *testing.T) { @@ -225,7 +225,7 @@ func TestQueryOneToOneToOneSecondaryThenPrimary(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Publisher", "Book", "Author"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToOneToOnePrimaryThenSecondary(t *testing.T) { @@ -331,7 +331,7 @@ func TestQueryOneToOneToOnePrimaryThenSecondary(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Publisher", "Book", "Author"}, test) + testUtils.ExecuteTestCase(t, test) } func TestQueryOneToOneToOneSecondary(t *testing.T) { @@ -437,5 +437,5 @@ func TestQueryOneToOneToOneSecondary(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Publisher", "Book", "Author"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/query/one_to_one_to_one/with_order_test.go b/tests/integration/query/one_to_one_to_one/with_order_test.go new file mode 100644 index 0000000000..c5da7a19e7 --- /dev/null +++ b/tests/integration/query/one_to_one_to_one/with_order_test.go @@ -0,0 +1,123 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_one_to_one + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryOneToOneToOneWithNestedOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "One-to-one-to-one relation primary direction", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Publisher { + name: String + printed: Book @primary + } + + type Book { + name: String + publisher: Publisher + author: Author @primary + } + + type Author { + name: String + published: Book + } + `, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" + Doc: `{ + "name": "Old Publisher" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 0, + // "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" + Doc: `{ + "name": "New Publisher" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" + Doc: `{ + "name": "Painted House", + "publisher_id": "bae-1f4cc394-08a8-5825-87b9-b02de2f25f7d" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 1, + // "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" + Doc: `{ + "name": "Theif Lord", + "publisher_id": "bae-a3cd6fac-13c0-5c8f-970b-0ce7abbb49a5" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 2, + Doc: `{ + "name": "John Grisham", + "published_id": "bae-a6cdabfc-17dd-5662-b213-c596ee4c3292" + }`, + }, + testUtils.CreateDoc{ + CollectionID: 2, + Doc: `{ + "name": "Cornelia Funke", + "published_id": "bae-bc198c5f-6238-5b50-8072-68dec9c7a16b" + }`, + }, + testUtils.Request{ + Request: `query { + Publisher(order: {printed: {author: {name: ASC}}}) { + name + printed { + name + author { + name + } + } + } + }`, + Results: []map[string]any{ + { + "name": "New Publisher", + "printed": map[string]any{ + "name": "Theif Lord", + "author": map[string]any{ + "name": "Cornelia Funke", + }, + }, + }, + { + "name": "Old Publisher", + "printed": map[string]any{ + "name": "Painted House", + "author": map[string]any{ + "name": "John Grisham", + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/query/simple/with_filter/with_not_test.go b/tests/integration/query/simple/with_filter/with_not_test.go new file mode 100644 index 0000000000..8ec86c15dd --- /dev/null +++ b/tests/integration/query/simple/with_filter/with_not_test.go @@ -0,0 +1,200 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package simple + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQuerySimple_WithNotEqualToXFilter_NoError(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with logical compound filter (not)", + Request: `query { + Users(filter: {_not: {Age: {_eq: 55}}}) { + Name + Age + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "John", + "Age": 21 + }`, + `{ + "Name": "Bob", + "Age": 32 + }`, + `{ + "Name": "Carlo", + "Age": 55 + }`, + `{ + "Name": "Alice", + "Age": 19 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Bob", + "Age": uint64(32), + }, + { + "Name": "Alice", + "Age": uint64(19), + }, + { + "Name": "John", + "Age": uint64(21), + }, + }, + } + + executeTestCase(t, test) +} + +func TestQuerySimple_WithNotEqualToXorYFilter_NoError(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with logical compound filter (not)", + Request: `query { + Users(filter: {_not: {_or: [{Age: {_eq: 55}}, {Name: {_eq: "Alice"}}]}}) { + Name + Age + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "John", + "Age": 21 + }`, + `{ + "Name": "Bob", + "Age": 32 + }`, + `{ + "Name": "Carlo", + "Age": 55 + }`, + `{ + "Name": "Alice", + "Age": 19 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Bob", + "Age": uint64(32), + }, + { + "Name": "John", + "Age": uint64(21), + }, + }, + } + + executeTestCase(t, test) +} + +func TestQuerySimple_WithEmptyNotFilter_ReturnError(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with empty logical compound filter (not) returns empty result set", + Request: `query { + Users(filter: {_not: {}}) { + Name + Age + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "John", + "Age": 21 + }`, + `{ + "Name": "Bob", + "Age": 32 + }`, + `{ + "Name": "Carlo", + "Age": 55 + }`, + `{ + "Name": "Alice", + "Age": 19 + }`, + }, + }, + Results: []map[string]any{}, + } + + executeTestCase(t, test) +} + +func TestQuerySimple_WithNotEqualToXAndNotYFilter_NoError(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with logical compound filter (not)", + Request: `query { + Users(filter: {_not: {Age: {_eq: 55}, _not: {Name: {_eq: "Carlo"}}}}) { + Name + Age + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "John", + "Age": 21 + }`, + `{ + "Name": "Bob", + "Age": 32 + }`, + `{ + "Name": "Carlo", + "Age": 55 + }`, + `{ + "Name": "Alice", + "Age": 19 + }`, + `{ + "Name": "Frank", + "Age": 55 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Bob", + "Age": uint64(32), + }, + { + "Name": "Alice", + "Age": uint64(19), + }, + { + "Name": "John", + "Age": uint64(21), + }, + { + "Name": "Carlo", + "Age": uint64(55), + }, + }, + } + + executeTestCase(t, test) +} diff --git a/tests/integration/query/simple/with_group_test.go b/tests/integration/query/simple/with_group_test.go index 5f19880081..c926f580a6 100644 --- a/tests/integration/query/simple/with_group_test.go +++ b/tests/integration/query/simple/with_group_test.go @@ -208,6 +208,71 @@ func TestQuerySimpleWithGroupByNumberWithGroupString(t *testing.T) { executeTestCase(t, test) } +func TestQuerySimpleWithGroupByWithoutGroupedFieldSelectedWithInnerGroup(t *testing.T) { + test := testUtils.RequestTestCase{ + Description: "Simple query with groupBy without selecting field grouped by, with inner _group.", + Request: `query { + Users(groupBy: [Name]) { + Name + _group { + Age + } + } + }`, + Docs: map[int][]string{ + 0: { + `{ + "Name": "John", + "Age": 25 + }`, + `{ + "Name": "John", + "Age": 32 + }`, + `{ + "Name": "Carlo", + "Age": 55 + }`, + `{ + "Name": "Alice", + "Age": 19 + }`, + }, + }, + Results: []map[string]any{ + { + "Name": "Alice", + "_group": []map[string]any{ + { + "Age": uint64(19), + }, + }, + }, + { + "Name": "John", + "_group": []map[string]any{ + { + "Age": uint64(32), + }, + { + "Age": uint64(25), + }, + }, + }, + { + "Name": "Carlo", + "_group": []map[string]any{ + { + "Age": uint64(55), + }, + }, + }, + }, + } + + executeTestCase(t, test) +} + func TestQuerySimpleWithGroupByString(t *testing.T) { test := testUtils.RequestTestCase{ Description: "Simple query with group by string", diff --git a/tests/integration/query/simple/with_restart_test.go b/tests/integration/query/simple/with_restart_test.go index 67e4f07f87..34b906a4bc 100644 --- a/tests/integration/query/simple/with_restart_test.go +++ b/tests/integration/query/simple/with_restart_test.go @@ -53,5 +53,5 @@ func TestQuerySimpleWithRestart(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/aggregates/inline_array_test.go b/tests/integration/schema/aggregates/inline_array_test.go index 3642731549..f5c6199e39 100644 --- a/tests/integration/schema/aggregates/inline_array_test.go +++ b/tests/integration/schema/aggregates/inline_array_test.go @@ -138,7 +138,7 @@ func TestSchemaAggregateInlineArrayCreatesUsersCount(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } */ @@ -261,7 +261,7 @@ func TestSchemaAggregateInlineArrayCreatesUsersSum(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } /* WIP @@ -384,7 +384,7 @@ func TestSchemaAggregateInlineArrayCreatesUsersAverage(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } */ @@ -589,7 +589,7 @@ func TestSchemaAggregateInlineArrayCreatesUsersNillableBooleanCountFilter(t *tes }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateInlineArrayCreatesUsersBooleanCountFilter(t *testing.T) { @@ -715,7 +715,7 @@ func TestSchemaAggregateInlineArrayCreatesUsersBooleanCountFilter(t *testing.T) }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateInlineArrayCreatesUsersNillableIntegerCountFilter(t *testing.T) { @@ -865,7 +865,7 @@ func TestSchemaAggregateInlineArrayCreatesUsersNillableIntegerCountFilter(t *tes }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateInlineArrayCreatesUsersIntegerCountFilter(t *testing.T) { @@ -1015,7 +1015,7 @@ func TestSchemaAggregateInlineArrayCreatesUsersIntegerCountFilter(t *testing.T) }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateInlineArrayCreatesUsersNillableFloatCountFilter(t *testing.T) { @@ -1165,7 +1165,7 @@ func TestSchemaAggregateInlineArrayCreatesUsersNillableFloatCountFilter(t *testi }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateInlineArrayCreatesUsersFloatCountFilter(t *testing.T) { @@ -1315,7 +1315,7 @@ func TestSchemaAggregateInlineArrayCreatesUsersFloatCountFilter(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateInlineArrayCreatesUsersNillableStringCountFilter(t *testing.T) { @@ -1453,7 +1453,7 @@ func TestSchemaAggregateInlineArrayCreatesUsersNillableStringCountFilter(t *test }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateInlineArrayCreatesUsersStringCountFilter(t *testing.T) { @@ -1591,5 +1591,5 @@ func TestSchemaAggregateInlineArrayCreatesUsersStringCountFilter(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/aggregates/simple_test.go b/tests/integration/schema/aggregates/simple_test.go index 29399df05b..ef9eef19a6 100644 --- a/tests/integration/schema/aggregates/simple_test.go +++ b/tests/integration/schema/aggregates/simple_test.go @@ -109,7 +109,7 @@ func TestSchemaAggregateSimpleCreatesUsersCount(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateSimpleCreatesUsersSum(t *testing.T) { @@ -213,7 +213,7 @@ func TestSchemaAggregateSimpleCreatesUsersSum(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateSimpleCreatesUsersAverage(t *testing.T) { @@ -317,5 +317,5 @@ func TestSchemaAggregateSimpleCreatesUsersAverage(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/aggregates/top_level_test.go b/tests/integration/schema/aggregates/top_level_test.go index 5f84163d2d..5dd10c0f07 100644 --- a/tests/integration/schema/aggregates/top_level_test.go +++ b/tests/integration/schema/aggregates/top_level_test.go @@ -91,7 +91,7 @@ func TestSchemaAggregateTopLevelCreatesCountGivenSchema(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateTopLevelCreatesSumGivenSchema(t *testing.T) { @@ -197,7 +197,7 @@ func TestSchemaAggregateTopLevelCreatesSumGivenSchema(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaAggregateTopLevelCreatesAverageGivenSchema(t *testing.T) { @@ -303,5 +303,5 @@ func TestSchemaAggregateTopLevelCreatesAverageGivenSchema(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/client_introspection/one_many_test.go b/tests/integration/schema/client_introspection/one_many_test.go index 45c8c392eb..ae8d5dcfc3 100644 --- a/tests/integration/schema/client_introspection/one_many_test.go +++ b/tests/integration/schema/client_introspection/one_many_test.go @@ -43,5 +43,5 @@ func TestClientIntrospectionWithOneToManySchema(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Book", "Author"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/client_introspection/simple_test.go b/tests/integration/schema/client_introspection/simple_test.go index 5576cd91d0..ba9c8472f3 100644 --- a/tests/integration/schema/client_introspection/simple_test.go +++ b/tests/integration/schema/client_introspection/simple_test.go @@ -28,5 +28,5 @@ func TestClientIntrospectionBasic(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/client_test.go b/tests/integration/schema/client_test.go index 8fdaf3373d..6c603b9b71 100644 --- a/tests/integration/schema/client_test.go +++ b/tests/integration/schema/client_test.go @@ -49,5 +49,5 @@ func TestIntrospectionExplainTypeDefined(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/default_fields.go b/tests/integration/schema/default_fields.go index a598cd3603..5b3a75de80 100644 --- a/tests/integration/schema/default_fields.go +++ b/tests/integration/schema/default_fields.go @@ -218,7 +218,7 @@ func buildFilterArg(objectName string, fields []argDef) Field { "name": filterArgName, }), makeInputObject("_key", "IDOperatorBlock", nil), - makeInputObject("_not", "authorFilterArg", nil), + makeInputObject("_not", filterArgName, nil), makeInputObject("_or", nil, map[string]any{ "kind": "INPUT_OBJECT", "name": filterArgName, diff --git a/tests/integration/schema/filter_test.go b/tests/integration/schema/filter_test.go index ff1acc4139..17f38408bf 100644 --- a/tests/integration/schema/filter_test.go +++ b/tests/integration/schema/filter_test.go @@ -118,7 +118,7 @@ func TestFilterForSimpleSchema(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } var testFilterForSimpleSchemaArgProps = map[string]any{ @@ -270,7 +270,7 @@ func TestFilterForOneToOneSchema(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Book", "Author"}, test) + testUtils.ExecuteTestCase(t, test) } var testFilterForOneToOneSchemaArgProps = map[string]any{ diff --git a/tests/integration/schema/group_test.go b/tests/integration/schema/group_test.go new file mode 100644 index 0000000000..35a5171c73 --- /dev/null +++ b/tests/integration/schema/group_test.go @@ -0,0 +1,142 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package schema + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestGroupByFieldForTheManySideInSchema(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Test the fields for the many side groupBy are generated.", + + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author + } + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, + }, + testUtils.IntrospectionRequest{ + Request: ` + { + __type(name: "BookFields") { + name + kind + enumValues { + name + } + } + } + `, + ContainsData: map[string]any{ + "__type": map[string]any{ + "kind": "ENUM", + "name": "BookFields", + "enumValues": []any{ + // Internal related object fields. + map[string]any{"name": "author"}, + map[string]any{"name": "author_id"}, + + // Internal fields. + map[string]any{"name": "_deleted"}, + map[string]any{"name": "_group"}, + map[string]any{"name": "_key"}, + map[string]any{"name": "_version"}, + + // User defined schema fields> + map[string]any{"name": "name"}, + map[string]any{"name": "rating"}, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestGroupByFieldForTheSingleSideInSchema(t *testing.T) { + test := testUtils.TestCase{ + + Description: "Test the fields for the single side groupBy are generated.", + + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Book { + name: String + rating: Float + author: Author + } + + type Author { + name: String + age: Int + verified: Boolean + published: [Book] + } + `, + }, + testUtils.IntrospectionRequest{ + Request: ` + { + __type(name: "AuthorFields") { + name + kind + enumValues { + name + } + } + } + `, + ContainsData: map[string]any{ + "__type": map[string]any{ + "kind": "ENUM", + "name": "AuthorFields", + "enumValues": []any{ + // Internal related object fields. + map[string]any{"name": "published"}, + // Note: No `published_id` of this side. + + // Internal fields. + map[string]any{"name": "_deleted"}, + map[string]any{"name": "_group"}, + map[string]any{"name": "_key"}, + map[string]any{"name": "_version"}, + + // User defined schema fields> + map[string]any{"name": "name"}, + map[string]any{"name": "age"}, + map[string]any{"name": "verified"}, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/input_type_test.go b/tests/integration/schema/input_type_test.go index 66beb825da..e50920dc3b 100644 --- a/tests/integration/schema/input_type_test.go +++ b/tests/integration/schema/input_type_test.go @@ -16,6 +16,109 @@ import ( testUtils "github.com/sourcenetwork/defradb/tests/integration" ) +func TestInputTypeOfOrderFieldWhereSchemaHasManyRelationType(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type user { + age: Int + name: String + points: Float + verified: Boolean + group: group + } + + type group { + members: [user] + } + `, + }, + testUtils.IntrospectionRequest{ + Request: ` + query { + __type (name: "group") { + name + fields { + name + args { + name + type { + name + ofType { + name + kind + } + inputFields { + name + type { + name + ofType { + name + kind + } + } + } + } + } + } + } + } + `, + ContainsData: map[string]any{ + "__type": map[string]any{ + "name": "group", + "fields": []any{ + map[string]any{ + // Asserting only on group, because it is the field that contains `order` info we are + // looking for, additionally wanted to reduce the noise of other elements that were getting + // dumped out which made the entire output horrible. + "name": "_group", + "args": append( + trimFields( + fields{ + dockeyArg, + dockeysArg, + buildFilterArg("group", []argDef{ + { + fieldName: "members", + typeName: "userFilterArg", + }, + }), + groupByArg, + limitArg, + offsetArg, + }, + testInputTypeOfOrderFieldWhereSchemaHasRelationTypeArgProps, + ), + map[string]any{ + "name": "order", + "type": map[string]any{ + "name": "groupOrderArg", + "ofType": nil, + "inputFields": []any{ + map[string]any{ + "name": "_key", + "type": map[string]any{ + "name": "Ordering", + "ofType": nil, + }, + }, + }, + }, + }, + ).Tidy(), + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + func TestInputTypeOfOrderFieldWhereSchemaHasRelationType(t *testing.T) { test := testUtils.TestCase{ Actions: []any{ @@ -138,7 +241,7 @@ func TestInputTypeOfOrderFieldWhereSchemaHasRelationType(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"book", "author"}, test) + testUtils.ExecuteTestCase(t, test) } var testInputTypeOfOrderFieldWhereSchemaHasRelationTypeArgProps = map[string]any{ diff --git a/tests/integration/schema/migrations/query/simple_test.go b/tests/integration/schema/migrations/query/simple_test.go new file mode 100644 index 0000000000..56f94b2e6b --- /dev/null +++ b/tests/integration/schema/migrations/query/simple_test.go @@ -0,0 +1,910 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package query + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestSchemaMigrationQuery(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMultipleDocs(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, multiple documents", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Islam" + }`, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Shahzad" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Islam", + "verified": true, + }, + { + "name": "Fred", + "verified": true, + }, + { + "name": "Shahzad", + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +// Users may want to register migrations before the schema is locally updated. This may be particularly useful +// for downgrading documents recieved via P2P. +func TestSchemaMigrationQueryWithMigrationRegisteredBeforeSchemaPatch(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, migration set before schema updated", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMigratesToIntermediaryVersion(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, to intermediary version", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + }, + testUtils.ConfigureMigration{ + // Register a migration from schema version 1 to schema version 2 **only** - + // there should be no migration from version 2 to version 3. + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + email + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + "email": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMigratesFromIntermediaryVersion(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, from intermediary version", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + }, + testUtils.ConfigureMigration{ + // Register a migration from schema version 2 to schema version 3 **only** - + // there should be no migration from version 1 to version 2. + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + DestinationSchemaVersionID: "bafkreiadb2rps7a2zykywfxwfpgkvet5vmzaig4nvzl5sgfqquzr3qrvsq", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + email + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + "email": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, across multiple migrated versions", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + DestinationSchemaVersionID: "bafkreiadb2rps7a2zykywfxwfpgkvet5vmzaig4nvzl5sgfqquzr3qrvsq", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "email", + "value": "ilovewasm@source.com", + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + email + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + "email": "ilovewasm@source.com", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +// This test is important as it tests that orphan migrations do not block the fetcher(s) +// from functioning. +// +// It is important to allow these orphans to be persisted as they may later become linked to the +// schema version history chain as either new migrations are added or the local schema is updated +// bridging the gap. +func TestSchemaMigrationQueryWithUnknownSchemaMigration(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "not a schema version", + DestinationSchemaVersionID: "also not a schema version", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMigrationMutatesExistingScalarField(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, migration mutating existing scalar field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + // This may appear to be an odd thing to do, but it is just a simplification. + // Existing fields may be mutated by migrations, and that is what we are testing + // here. + Arguments: map[string]any{ + "dst": "name", + "value": "Fred", + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMigrationMutatesExistingInlineArrayField(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, migration mutating existing inline-array field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + mobile: [Int!] + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "mobile": [644, 832, 8325] + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreic427cayffkscmp2ng224wpmsryzwz5aec6dhbfr2xoljb4xbugji", + DestinationSchemaVersionID: "bafkreidrmuahiz4qenylm247udlro732ip3adwv3dqpeds3s2kghwtfvt4", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + // This may appear to be an odd thing to do, but it is just a simplification. + // Existing fields may be mutated by migrations, and that is what we are testing + // here. + Arguments: map[string]any{ + "dst": "mobile", + "value": []int{847, 723, 2012}, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + mobile + } + }`, + Results: []map[string]any{ + { + "mobile": []int64{847, 723, 2012}, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMigrationRemovesExistingField(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, migration removing existing field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + age: Int + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John", + "age": 40 + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", + DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.RemoveModulePath, + Arguments: map[string]any{ + "target": "age", + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "John", + "age": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMigrationPreservesExistingFieldWhenFieldNotRequested(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, migration preserves existing field without requesting it", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + age: Int + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John", + "age": 40 + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", + DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "name", + "value": "Fred", + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + age + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + "age": uint64(40), + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcFieldNotRequested(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, migration copies existing field without requesting src", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + age: Int + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John", + "age": 40 + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "yearsLived", "Kind": "Int"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", + DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.CopyModulePath, + Arguments: map[string]any{ + "src": "age", + "dst": "yearsLived", + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + yearsLived + } + }`, + Results: []map[string]any{ + { + "name": "John", + "yearsLived": uint64(40), + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcAndDstFieldNotRequested(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, migration copies existing field without requesting src or dst", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + age: Int + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John", + "age": 40 + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "yearsLived", "Kind": "Int"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", + DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.CopyModulePath, + Arguments: map[string]any{ + "src": "age", + "dst": "yearsLived", + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + } + }`, + Results: []map[string]any{ + { + "name": "John", + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + age + yearsLived + } + }`, + Results: []map[string]any{ + { + "name": "John", + "age": uint64(40), + "yearsLived": uint64(40), + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/migrations/query/with_dockey_test.go b/tests/integration/schema/migrations/query/with_dockey_test.go new file mode 100644 index 0000000000..db58c9f066 --- /dev/null +++ b/tests/integration/schema/migrations/query/with_dockey_test.go @@ -0,0 +1,264 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package query + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +// This test asserts that spans are being passed correctly through the new Lens fetcher. +func TestSchemaMigrationQueryByDocKey(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, query by key", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + // bae-d7546ac1-c133-5853-b866-9b9f926fe7e5 + Doc: `{ + "name": "Shahzad" + }`, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users (dockey: "bae-d7546ac1-c133-5853-b866-9b9f926fe7e5") { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Shahzad", + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +// This test asserts that lenses are being correctly returned to the pool for reuse after +// fetch completion. Querying by dockey should mean that the fetcher only scans the dockey +// prefix, and thus will only migrate a single document per query (unlike filters etc which +// will migrate all documents at the time of writing). If the return mechanic was very faulty +// then this test *should* deadlock. +// +// This behaviour should be covered more in-depth by unit tests, as it would be particularly +// bad if it broke and is fairly encumbersome to fully test via our current integration test +// framework. +// +// At the time of writing, the lens pool size is hardcoded to 5, so we should test with 6 +// documents/queries, if the size changes so should this test. +func TestSchemaMigrationQueryMultipleQueriesByDocKey(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, multiple queries by key", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + // We want 6 documents, and 6 queries, as lens pool is limited to 5 + // and we want to make sure that lenses are being correctly returned + // to the pool for reuse after. + testUtils.CreateDoc{ + // bae-d7546ac1-c133-5853-b866-9b9f926fe7e5 + Doc: `{ + "name": "Shahzad" + }`, + }, + testUtils.CreateDoc{ + // bae-92393ad0-07b6-5753-8dbb-19c9c41374ed + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.CreateDoc{ + // bae-403d7337-f73e-5c81-8719-e853938c8985 + Doc: `{ + "name": "Chris" + }`, + }, + testUtils.CreateDoc{ + // bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad + Doc: `{ + "name": "John" + }`, + }, + testUtils.CreateDoc{ + // bae-3f1174ba-d9bc-5a6a-b0bc-8f19581f199d + Doc: `{ + "name": "Islam" + }`, + }, + testUtils.CreateDoc{ + // bae-0698bda7-2c69-5028-a26a-0a1c491b793b + Doc: `{ + "name": "Dave" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users (dockey: "bae-d7546ac1-c133-5853-b866-9b9f926fe7e5") { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Shahzad", + "verified": true, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users (dockey: "bae-92393ad0-07b6-5753-8dbb-19c9c41374ed") { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + "verified": true, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users (dockey: "bae-403d7337-f73e-5c81-8719-e853938c8985") { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Chris", + "verified": true, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users (dockey: "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad") { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users (dockey: "bae-3f1174ba-d9bc-5a6a-b0bc-8f19581f199d") { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Islam", + "verified": true, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users (dockey: "bae-0698bda7-2c69-5028-a26a-0a1c491b793b") { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Dave", + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/migrations/query/with_p2p_test.go b/tests/integration/schema/migrations/query/with_p2p_test.go new file mode 100644 index 0000000000..d71ccbc51a --- /dev/null +++ b/tests/integration/schema/migrations/query/with_p2p_test.go @@ -0,0 +1,111 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package query + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestSchemaMigrationQueryWithP2PReplicatedDocAtOlderSchemaVersion(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.RandomNetworkingConfig(), + testUtils.RandomNetworkingConfig(), + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + // Patch node 1 only + NodeID: immutable.Some(1), + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + // Register the migration on both nodes. + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.ConfigureReplicator{ + SourceNodeID: 0, + TargetNodeID: 1, + }, + testUtils.CreateDoc{ + // Create John on the first (source) node only, and allow the value to sync + NodeID: immutable.Some(0), + Doc: `{ + "name": "John" + }`, + }, + testUtils.WaitForSync{}, + testUtils.Request{ + // Node 0 should yield results as they were defined, as the newer schema version is + // unknown to this node. + NodeID: immutable.Some(0), + Request: `query { + Users { + name + } + }`, + Results: []map[string]any{ + { + "name": "John", + }, + }, + }, + testUtils.Request{ + // Node 1 should yield results migrated to the new schema version. + NodeID: immutable.Some(1), + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + // todo: The migration has not been run as P2P assumes it is being synced at the latest local version + "verified": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/migrations/query/with_restart_test.go b/tests/integration/schema/migrations/query/with_restart_test.go new file mode 100644 index 0000000000..2c1253bfd0 --- /dev/null +++ b/tests/integration/schema/migrations/query/with_restart_test.go @@ -0,0 +1,82 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package query + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestSchemaMigrationQueryWithRestart(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, with restart", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Restart{}, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/migrations/query/with_txn_test.go b/tests/integration/schema/migrations/query/with_txn_test.go new file mode 100644 index 0000000000..059af4d461 --- /dev/null +++ b/tests/integration/schema/migrations/query/with_txn_test.go @@ -0,0 +1,153 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package query + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +// todo: This test documents unwanted behaviour and should be fixed with +// https://github.com/sourcenetwork/defradb/issues/1592 +func TestSchemaMigrationQueryWithTxn(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, with transaction", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + TransactionID: immutable.Some(0), + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + TransactionID: immutable.Some(0), + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + // This is the bug - although the request and migration are on the same transaction + // the migration is not picked up during the request. + "verified": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryWithTxnAndCommit(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + TransactionID: immutable.Some(0), + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.TransactionCommit{ + TransactionID: 0, + }, + testUtils.Request{ + TransactionID: immutable.Some(1), + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/migrations/query/with_update_test.go b/tests/integration/schema/migrations/query/with_update_test.go new file mode 100644 index 0000000000..35c6965ead --- /dev/null +++ b/tests/integration/schema/migrations/query/with_update_test.go @@ -0,0 +1,167 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package query + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestSchemaMigrationQueryWithUpdateRequest(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, with update request", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `mutation { + update_Users(data: "{\"name\":\"Johnnnn\"}") { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Johnnnn", + // We need to assert that the migration has been run within the context + // of the update + "verified": true, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Johnnnn", + // We need to assert that the effects of the migration executed within the + // update have been persisted + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQueryWithMigrationRegisteredAfterUpdate(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, with migration registered after update", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.UpdateDoc{ + // Update the document **before** registering the migration + Doc: `{ + "name": "Johnnnn" + }`, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "Johnnnn", + // As the document was updated before the migration was registered + // the migration will not have been run + "verified": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/migrations/simple_test.go b/tests/integration/schema/migrations/simple_test.go new file mode 100644 index 0000000000..b63be03b5a --- /dev/null +++ b/tests/integration/schema/migrations/simple_test.go @@ -0,0 +1,211 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrations + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +// Migrations need to be able to be registered for unknown schema ids, so they +// may migrate to/from them if recieved by the P2P system. +func TestSchemaMigrationDoesNotErrorGivenUnknownSchemaIDs(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, unknown schema ids", + Actions: []any{ + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "does not exist", + DestinationSchemaVersionID: "also does not exist", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, + }, + }, + }, + testUtils.GetMigrations{ + ExpectedResults: []client.LensConfig{ + { + SourceSchemaVersionID: "does not exist", + DestinationSchemaVersionID: "also does not exist", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationGetMigrationsReturnsMultiple(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, multiple migrations", + Actions: []any{ + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "does not exist", + DestinationSchemaVersionID: "also does not exist", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, + }, + }, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.GetMigrations{ + ExpectedResults: []client.LensConfig{ + { + SourceSchemaVersionID: "does not exist", + DestinationSchemaVersionID: "also does not exist", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, + }, + }, + { + SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationReplacesExistingMigationBasedOnSourceID(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, replace migration", + Actions: []any{ + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "a", + DestinationSchemaVersionID: "b", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, + }, + }, + }, + testUtils.ConfigureMigration{ + // Replace the original migration with a new configuration + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "a", + DestinationSchemaVersionID: "c", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "age", + "value": 123, + }, + }, + }, + }, + }, + }, + testUtils.GetMigrations{ + ExpectedResults: []client.LensConfig{ + { + SourceSchemaVersionID: "a", + DestinationSchemaVersionID: "c", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "age", + "value": 123, + }, + }, + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/migrations/with_txn_test.go b/tests/integration/schema/migrations/with_txn_test.go new file mode 100644 index 0000000000..f8eb5b5611 --- /dev/null +++ b/tests/integration/schema/migrations/with_txn_test.go @@ -0,0 +1,58 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package migrations + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +// todo: This test documents unwanted behaviour and should be fixed with +// https://github.com/sourcenetwork/defradb/issues/1592 +func TestSchemaMigrationGetMigrationsWithTxn(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, with txn", + Actions: []any{ + testUtils.ConfigureMigration{ + TransactionID: immutable.Some(0), + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "does not exist", + DestinationSchemaVersionID: "also does not exist", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, + }, + }, + }, + testUtils.GetMigrations{ + TransactionID: immutable.Some(0), + // This is the bug - although the GetMigrations call and migration are on the same transaction + // the migration is not returned in the results. + ExpectedResults: []client.LensConfig{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/relations_test.go b/tests/integration/schema/relations_test.go new file mode 100644 index 0000000000..9af43b2095 --- /dev/null +++ b/tests/integration/schema/relations_test.go @@ -0,0 +1,182 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package schema + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestSchemaRelationOneToOne(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Dog { + name: String + user: User + } + type User { + dog: Dog + } + `, + }, + testUtils.IntrospectionRequest{ + Request: ` + query { + __type (name: "User") { + name + fields { + name + type { + name + kind + } + } + } + } + `, + ExpectedData: map[string]any{ + "__type": map[string]any{ + "name": "User", + "fields": append(DefaultFields, + Field{ + "name": "dog", + "type": map[string]any{ + "kind": "OBJECT", + "name": "Dog", + }, + }, + Field{ + "name": "dog_id", + "type": map[string]any{ + "kind": "SCALAR", + "name": "ID", + }, + }, + ).Tidy(), + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaRelationManyToOne(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Dog { + name: String + user: User + } + type User { + dogs: [Dog] + } + `, + }, + testUtils.IntrospectionRequest{ + Request: ` + query { + __type (name: "User") { + name + fields { + name + type { + name + kind + } + } + } + } + `, + ExpectedData: map[string]any{ + "__type": map[string]any{ + "name": "User", + "fields": append(DefaultFields, + Field{ + "name": "dogs", + "type": map[string]any{ + "kind": "LIST", + "name": nil, + }, + }, + ).Tidy(), + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaRelationErrorsGivenOneSidedManyRelationField(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Dog { + name: String + } + type User { + dogs: [Dog] + } + `, + ExpectedError: "relation must be defined on both schemas. Type: Dog", + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaRelationErrorsGivenOneSidedRelationField(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Dog { + name: String + } + type User { + dog: Dog + } + `, + ExpectedError: "relation must be defined on both schemas. Type: Dog", + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaRelation_GivenSelfReferemceRelationField_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Dog { + name: String + bestMate: Dog + } + `, + ExpectedError: "relation must be defined on both schemas. Type: Dog", + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/simple_test.go b/tests/integration/schema/simple_test.go index e6d46ae7e5..c90fee99e0 100644 --- a/tests/integration/schema/simple_test.go +++ b/tests/integration/schema/simple_test.go @@ -41,7 +41,7 @@ func TestSchemaSimpleCreatesSchemaGivenEmptyType(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaSimpleErrorsGivenDuplicateSchema(t *testing.T) { @@ -62,7 +62,7 @@ func TestSchemaSimpleErrorsGivenDuplicateSchema(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaSimpleErrorsGivenDuplicateSchemaInSameSDL(t *testing.T) { @@ -78,7 +78,7 @@ func TestSchemaSimpleErrorsGivenDuplicateSchemaInSameSDL(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaSimpleCreatesSchemaGivenNewTypes(t *testing.T) { @@ -111,7 +111,7 @@ func TestSchemaSimpleCreatesSchemaGivenNewTypes(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users", "Books"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaSimpleCreatesSchemaWithDefaultFieldsGivenEmptyType(t *testing.T) { @@ -147,7 +147,7 @@ func TestSchemaSimpleCreatesSchemaWithDefaultFieldsGivenEmptyType(t *testing.T) }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaSimpleErrorsGivenTypeWithInvalidFieldType(t *testing.T) { @@ -159,12 +159,12 @@ func TestSchemaSimpleErrorsGivenTypeWithInvalidFieldType(t *testing.T) { name: NotAType } `, - ExpectedError: "no type found for given name", + ExpectedError: "relation must be defined on both schemas. Type: NotAType", }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaSimpleCreatesSchemaGivenTypeWithStringField(t *testing.T) { @@ -210,7 +210,7 @@ func TestSchemaSimpleCreatesSchemaGivenTypeWithStringField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaSimpleErrorsGivenNonNullField(t *testing.T) { @@ -227,7 +227,7 @@ func TestSchemaSimpleErrorsGivenNonNullField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaSimpleErrorsGivenNonNullManyRelationField(t *testing.T) { @@ -248,5 +248,5 @@ func TestSchemaSimpleErrorsGivenNonNullManyRelationField(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Dogs", "Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/crdt/composite_test.go b/tests/integration/schema/updates/add/field/crdt/composite_test.go index c708bb8b53..1ad0dc06d2 100644 --- a/tests/integration/schema/updates/add/field/crdt/composite_test.go +++ b/tests/integration/schema/updates/add/field/crdt/composite_test.go @@ -37,5 +37,5 @@ func TestSchemaUpdatesAddFieldCRDTCompositeErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/crdt/invalid_test.go b/tests/integration/schema/updates/add/field/crdt/invalid_test.go index 1aaaa22945..0c899155fb 100644 --- a/tests/integration/schema/updates/add/field/crdt/invalid_test.go +++ b/tests/integration/schema/updates/add/field/crdt/invalid_test.go @@ -37,5 +37,5 @@ func TestSchemaUpdatesAddFieldCRDTInvalidErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/crdt/lww_test.go b/tests/integration/schema/updates/add/field/crdt/lww_test.go index a11c37205b..c8a4b93007 100644 --- a/tests/integration/schema/updates/add/field/crdt/lww_test.go +++ b/tests/integration/schema/updates/add/field/crdt/lww_test.go @@ -45,5 +45,5 @@ func TestSchemaUpdatesAddFieldCRDTLWW(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/crdt/none_test.go b/tests/integration/schema/updates/add/field/crdt/none_test.go index fc9f6e462a..2ed83e3898 100644 --- a/tests/integration/schema/updates/add/field/crdt/none_test.go +++ b/tests/integration/schema/updates/add/field/crdt/none_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldCRDTDefault(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldCRDTNone(t *testing.T) { @@ -77,5 +77,5 @@ func TestSchemaUpdatesAddFieldCRDTNone(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/crdt/object_bool_test.go b/tests/integration/schema/updates/add/field/crdt/object_bool_test.go index b8c8e5438a..5d87c8a57e 100644 --- a/tests/integration/schema/updates/add/field/crdt/object_bool_test.go +++ b/tests/integration/schema/updates/add/field/crdt/object_bool_test.go @@ -37,5 +37,5 @@ func TestSchemaUpdatesAddFieldCRDTObjectWithBoolFieldErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/create_test.go b/tests/integration/schema/updates/add/field/create_test.go index ffe9f7ab07..a6a14f2142 100644 --- a/tests/integration/schema/updates/add/field/create_test.go +++ b/tests/integration/schema/updates/add/field/create_test.go @@ -58,7 +58,7 @@ func TestSchemaUpdatesAddFieldWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldWithCreateAfterSchemaUpdate(t *testing.T) { @@ -118,5 +118,5 @@ func TestSchemaUpdatesAddFieldWithCreateAfterSchemaUpdate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/create_update_test.go b/tests/integration/schema/updates/add/field/create_update_test.go index 378a16dc4a..1722531568 100644 --- a/tests/integration/schema/updates/add/field/create_update_test.go +++ b/tests/integration/schema/updates/add/field/create_update_test.go @@ -101,7 +101,7 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoi }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndCommitQuery(t *testing.T) { @@ -157,5 +157,5 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndCommitQuer }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/bool_array_test.go b/tests/integration/schema/updates/add/field/kind/bool_array_test.go index b05448daa9..ee8d53644e 100644 --- a/tests/integration/schema/updates/add/field/kind/bool_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/bool_array_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldKindBoolArray(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindBoolArrayWithCreate(t *testing.T) { @@ -89,7 +89,7 @@ func TestSchemaUpdatesAddFieldKindBoolArrayWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindBoolArraySubstitutionWithCreate(t *testing.T) { @@ -133,5 +133,5 @@ func TestSchemaUpdatesAddFieldKindBoolArraySubstitutionWithCreate(t *testing.T) }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go index dfeaa015a7..e0c664127b 100644 --- a/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go @@ -47,7 +47,7 @@ func TestSchemaUpdatesAddFieldKindNillableBoolArray(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindNillableBoolArrayWithCreate(t *testing.T) { @@ -91,7 +91,7 @@ func TestSchemaUpdatesAddFieldKindNillableBoolArrayWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindNillableBoolArraySubstitutionWithCreate(t *testing.T) { @@ -135,5 +135,5 @@ func TestSchemaUpdatesAddFieldKindNillableBoolArraySubstitutionWithCreate(t *tes }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/bool_test.go b/tests/integration/schema/updates/add/field/kind/bool_test.go index 9155101f10..7be3801bc3 100644 --- a/tests/integration/schema/updates/add/field/kind/bool_test.go +++ b/tests/integration/schema/updates/add/field/kind/bool_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldKindBool(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindBoolWithCreate(t *testing.T) { @@ -89,7 +89,7 @@ func TestSchemaUpdatesAddFieldKindBoolWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindBoolSubstitutionWithCreate(t *testing.T) { @@ -133,5 +133,5 @@ func TestSchemaUpdatesAddFieldKindBoolSubstitutionWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/datetime_test.go b/tests/integration/schema/updates/add/field/kind/datetime_test.go index 051fd6ed1b..5363864c47 100644 --- a/tests/integration/schema/updates/add/field/kind/datetime_test.go +++ b/tests/integration/schema/updates/add/field/kind/datetime_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldKindDateTime(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindDateTimeWithCreate(t *testing.T) { @@ -89,7 +89,7 @@ func TestSchemaUpdatesAddFieldKindDateTimeWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindDateTimeSubstitutionWithCreate(t *testing.T) { @@ -133,5 +133,5 @@ func TestSchemaUpdatesAddFieldKindDateTimeSubstitutionWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/dockey_test.go b/tests/integration/schema/updates/add/field/kind/dockey_test.go index 942de4ef58..f4d5d9aabe 100644 --- a/tests/integration/schema/updates/add/field/kind/dockey_test.go +++ b/tests/integration/schema/updates/add/field/kind/dockey_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldKindDocKey(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindDocKeyWithCreate(t *testing.T) { @@ -89,7 +89,7 @@ func TestSchemaUpdatesAddFieldKindDocKeyWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindDocKeySubstitutionWithCreate(t *testing.T) { @@ -133,5 +133,5 @@ func TestSchemaUpdatesAddFieldKindDocKeySubstitutionWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/float_array_test.go b/tests/integration/schema/updates/add/field/kind/float_array_test.go index 3e5a6c2270..86e8ddd882 100644 --- a/tests/integration/schema/updates/add/field/kind/float_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/float_array_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldKindFloatArray(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindFloatArrayWithCreate(t *testing.T) { @@ -89,7 +89,7 @@ func TestSchemaUpdatesAddFieldKindFloatArrayWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindFloatArraySubstitutionWithCreate(t *testing.T) { @@ -133,5 +133,5 @@ func TestSchemaUpdatesAddFieldKindFloatArraySubstitutionWithCreate(t *testing.T) }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go index c15a6b9466..4cb1bb8133 100644 --- a/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go @@ -47,7 +47,7 @@ func TestSchemaUpdatesAddFieldKindNillableFloatArray(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindNillableFloatArrayWithCreate(t *testing.T) { @@ -95,7 +95,7 @@ func TestSchemaUpdatesAddFieldKindNillableFloatArrayWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindNillableFloatArraySubstitutionWithCreate(t *testing.T) { @@ -143,5 +143,5 @@ func TestSchemaUpdatesAddFieldKindNillableFloatArraySubstitutionWithCreate(t *te }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/float_test.go b/tests/integration/schema/updates/add/field/kind/float_test.go index cf25669bb3..9411a4e7d1 100644 --- a/tests/integration/schema/updates/add/field/kind/float_test.go +++ b/tests/integration/schema/updates/add/field/kind/float_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldKindFloat(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindFloatWithCreate(t *testing.T) { @@ -89,7 +89,7 @@ func TestSchemaUpdatesAddFieldKindFloatWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindFloatSubstitutionWithCreate(t *testing.T) { @@ -133,5 +133,5 @@ func TestSchemaUpdatesAddFieldKindFloatSubstitutionWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go b/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go index 511c89b499..6d96324f3c 100644 --- a/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go @@ -37,5 +37,5 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/foreign_object_test.go b/tests/integration/schema/updates/add/field/kind/foreign_object_test.go index eafb407bc9..76dd134982 100644 --- a/tests/integration/schema/updates/add/field/kind/foreign_object_test.go +++ b/tests/integration/schema/updates/add/field/kind/foreign_object_test.go @@ -37,5 +37,5 @@ func TestSchemaUpdatesAddFieldKindForeignObject(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/int_array_test.go b/tests/integration/schema/updates/add/field/kind/int_array_test.go index 2e79a04adb..4e7c732ec1 100644 --- a/tests/integration/schema/updates/add/field/kind/int_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/int_array_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldKindIntArray(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindIntArrayWithCreate(t *testing.T) { @@ -89,7 +89,7 @@ func TestSchemaUpdatesAddFieldKindIntArrayWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindIntArraySubstitutionWithCreate(t *testing.T) { @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindIntArraySubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Integer!]"} } + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Int!]"} } ] `, }, @@ -133,5 +133,5 @@ func TestSchemaUpdatesAddFieldKindIntArraySubstitutionWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go index eb2050d9c1..0642ffa894 100644 --- a/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go @@ -47,7 +47,7 @@ func TestSchemaUpdatesAddFieldKindNillableIntArray(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindNillableIntArrayWithCreate(t *testing.T) { @@ -95,7 +95,7 @@ func TestSchemaUpdatesAddFieldKindNillableIntArrayWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindNillableIntArraySubstitutionWithCreate(t *testing.T) { @@ -112,7 +112,7 @@ func TestSchemaUpdatesAddFieldKindNillableIntArraySubstitutionWithCreate(t *test testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Integer]"} } + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Int]"} } ] `, }, @@ -143,5 +143,5 @@ func TestSchemaUpdatesAddFieldKindNillableIntArraySubstitutionWithCreate(t *test }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/int_test.go b/tests/integration/schema/updates/add/field/kind/int_test.go index 41ec4e6794..3e12ed9106 100644 --- a/tests/integration/schema/updates/add/field/kind/int_test.go +++ b/tests/integration/schema/updates/add/field/kind/int_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldKindInt(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindIntWithCreate(t *testing.T) { @@ -89,7 +89,7 @@ func TestSchemaUpdatesAddFieldKindIntWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindIntSubstitutionWithCreate(t *testing.T) { @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindIntSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "Integer"} } + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "Int"} } ] `, }, @@ -133,5 +133,5 @@ func TestSchemaUpdatesAddFieldKindIntSubstitutionWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/invalid_test.go b/tests/integration/schema/updates/add/field/kind/invalid_test.go index 92985ed7d7..fa7556a86b 100644 --- a/tests/integration/schema/updates/add/field/kind/invalid_test.go +++ b/tests/integration/schema/updates/add/field/kind/invalid_test.go @@ -37,7 +37,7 @@ func TestSchemaUpdatesAddFieldKind8(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKind9(t *testing.T) { @@ -61,7 +61,7 @@ func TestSchemaUpdatesAddFieldKind9(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKind13(t *testing.T) { @@ -85,7 +85,7 @@ func TestSchemaUpdatesAddFieldKind13(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKind14(t *testing.T) { @@ -109,7 +109,7 @@ func TestSchemaUpdatesAddFieldKind14(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKind15(t *testing.T) { @@ -133,7 +133,7 @@ func TestSchemaUpdatesAddFieldKind15(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This test is currently the first unsupported value, if it becomes supported @@ -159,7 +159,7 @@ func TestSchemaUpdatesAddFieldKind22(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // Tests a semi-random but hardcoded unsupported kind to try and protect against anything odd permitting @@ -185,7 +185,7 @@ func TestSchemaUpdatesAddFieldKind198(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindInvalidSubstitution(t *testing.T) { @@ -209,5 +209,5 @@ func TestSchemaUpdatesAddFieldKindInvalidSubstitution(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/none_test.go b/tests/integration/schema/updates/add/field/kind/none_test.go index aa52f9c098..7e8c44dc73 100644 --- a/tests/integration/schema/updates/add/field/kind/none_test.go +++ b/tests/integration/schema/updates/add/field/kind/none_test.go @@ -37,5 +37,5 @@ func TestSchemaUpdatesAddFieldKindNone(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/string_array_test.go b/tests/integration/schema/updates/add/field/kind/string_array_test.go index 9d0c05124e..d3e03c8b35 100644 --- a/tests/integration/schema/updates/add/field/kind/string_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/string_array_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldKindStringArray(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindStringArrayWithCreate(t *testing.T) { @@ -89,7 +89,7 @@ func TestSchemaUpdatesAddFieldKindStringArrayWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindStringArraySubstitutionWithCreate(t *testing.T) { @@ -133,5 +133,5 @@ func TestSchemaUpdatesAddFieldKindStringArraySubstitutionWithCreate(t *testing.T }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go index d3f9ba76f0..c34fe22aba 100644 --- a/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go @@ -47,7 +47,7 @@ func TestSchemaUpdatesAddFieldKindNillableStringArray(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindNillableStringArrayWithCreate(t *testing.T) { @@ -95,7 +95,7 @@ func TestSchemaUpdatesAddFieldKindNillableStringArrayWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindNillableStringArraySubstitutionWithCreate(t *testing.T) { @@ -143,5 +143,5 @@ func TestSchemaUpdatesAddFieldKindNillableStringArraySubstitutionWithCreate(t *t }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/kind/string_test.go b/tests/integration/schema/updates/add/field/kind/string_test.go index 1801a76909..f32f9409c4 100644 --- a/tests/integration/schema/updates/add/field/kind/string_test.go +++ b/tests/integration/schema/updates/add/field/kind/string_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldKindString(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindStringWithCreate(t *testing.T) { @@ -89,7 +89,7 @@ func TestSchemaUpdatesAddFieldKindStringWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldKindStringSubstitutionWithCreate(t *testing.T) { @@ -133,5 +133,5 @@ func TestSchemaUpdatesAddFieldKindStringSubstitutionWithCreate(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/simple_test.go b/tests/integration/schema/updates/add/field/simple_test.go index e1094e3279..d64f9e3bbe 100644 --- a/tests/integration/schema/updates/add/field/simple_test.go +++ b/tests/integration/schema/updates/add/field/simple_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddFieldSimple(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldSimpleErrorsAddingToUnknownCollection(t *testing.T) { @@ -77,7 +77,7 @@ func TestSchemaUpdatesAddFieldSimpleErrorsAddingToUnknownCollection(t *testing.T }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldMultipleInPatch(t *testing.T) { @@ -111,7 +111,7 @@ func TestSchemaUpdatesAddFieldMultipleInPatch(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldMultiplePatches(t *testing.T) { @@ -151,7 +151,7 @@ func TestSchemaUpdatesAddFieldMultiplePatches(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldSimpleWithoutName(t *testing.T) { @@ -175,7 +175,7 @@ func TestSchemaUpdatesAddFieldSimpleWithoutName(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldMultipleInPatchPartialSuccess(t *testing.T) { @@ -220,7 +220,7 @@ func TestSchemaUpdatesAddFieldMultipleInPatchPartialSuccess(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldSimpleDuplicateOfExistingField(t *testing.T) { @@ -244,7 +244,7 @@ func TestSchemaUpdatesAddFieldSimpleDuplicateOfExistingField(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldSimpleDuplicateField(t *testing.T) { @@ -269,7 +269,7 @@ func TestSchemaUpdatesAddFieldSimpleDuplicateField(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldWithExplicitIDErrors(t *testing.T) { @@ -293,5 +293,5 @@ func TestSchemaUpdatesAddFieldWithExplicitIDErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/with_filter_test.go b/tests/integration/schema/updates/add/field/with_filter_test.go index e5c21e3c9e..decdb7b997 100644 --- a/tests/integration/schema/updates/add/field/with_filter_test.go +++ b/tests/integration/schema/updates/add/field/with_filter_test.go @@ -44,7 +44,7 @@ func TestSchemaUpdatesAddFieldSimpleWithFilter(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldSimpleWithFilterOnPopulatedDatabase(t *testing.T) { @@ -88,5 +88,5 @@ func TestSchemaUpdatesAddFieldSimpleWithFilterOnPopulatedDatabase(t *testing.T) }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/field/with_introspection_test.go b/tests/integration/schema/updates/add/field/with_introspection_test.go index 42f1f7aba2..df75ac43c3 100644 --- a/tests/integration/schema/updates/add/field/with_introspection_test.go +++ b/tests/integration/schema/updates/add/field/with_introspection_test.go @@ -65,7 +65,7 @@ func TestSchemaUpdatesAddFieldIntrospection(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddFieldIntrospectionDoesNotAmendGQLTypesGivenBadPatch(t *testing.T) { @@ -113,5 +113,5 @@ func TestSchemaUpdatesAddFieldIntrospectionDoesNotAmendGQLTypesGivenBadPatch(t * }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/add/simple_test.go b/tests/integration/schema/updates/add/simple_test.go index ba304acb43..b8e4ce3a5f 100644 --- a/tests/integration/schema/updates/add/simple_test.go +++ b/tests/integration/schema/updates/add/simple_test.go @@ -45,7 +45,7 @@ func TestSchemaUpdatesAddSimpleErrorsAddingSchema(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddSimpleErrorsAddingCollectionProp(t *testing.T) { @@ -69,7 +69,7 @@ func TestSchemaUpdatesAddSimpleErrorsAddingCollectionProp(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddSimpleErrorsAddingSchemaProp(t *testing.T) { @@ -93,7 +93,7 @@ func TestSchemaUpdatesAddSimpleErrorsAddingSchemaProp(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddSimpleErrorsAddingUnsupportedCollectionProp(t *testing.T) { @@ -125,7 +125,7 @@ func TestSchemaUpdatesAddSimpleErrorsAddingUnsupportedCollectionProp(t *testing. }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesAddSimpleErrorsAddingUnsupportedSchemaProp(t *testing.T) { @@ -157,5 +157,5 @@ func TestSchemaUpdatesAddSimpleErrorsAddingUnsupportedSchemaProp(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/copy/field/simple_test.go b/tests/integration/schema/updates/copy/field/simple_test.go index 766922f931..ff0680f55d 100644 --- a/tests/integration/schema/updates/copy/field/simple_test.go +++ b/tests/integration/schema/updates/copy/field/simple_test.go @@ -47,7 +47,7 @@ func TestSchemaUpdatesCopyFieldErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceName(t *testing.T) { @@ -85,7 +85,7 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceName(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This is an odd test, but still a possibility and we should still cover it. @@ -108,7 +108,7 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndKindSubstitution(t * { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/2" }, { "op": "remove", "path": "/Users/Schema/Fields/2/ID" }, { "op": "replace", "path": "/Users/Schema/Fields/2/Name", "value": "age" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Kind", "value": "Integer" } + { "op": "replace", "path": "/Users/Schema/Fields/2/Kind", "value": "Int" } ] `, }, @@ -136,7 +136,7 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndKindSubstitution(t * }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } // This is an odd test, but still a possibility and we should still cover it. @@ -166,5 +166,5 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndInvalidKindSubstitut }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/copy/field/with_introspection_test.go b/tests/integration/schema/updates/copy/field/with_introspection_test.go index ac47df6573..566b18db7c 100644 --- a/tests/integration/schema/updates/copy/field/with_introspection_test.go +++ b/tests/integration/schema/updates/copy/field/with_introspection_test.go @@ -77,5 +77,5 @@ func TestSchemaUpdatesCopyFieldIntrospectionWithRemoveIDAndReplaceName(t *testin }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/copy/simple_test.go b/tests/integration/schema/updates/copy/simple_test.go index e1d6ba52ec..5b4c19ed22 100644 --- a/tests/integration/schema/updates/copy/simple_test.go +++ b/tests/integration/schema/updates/copy/simple_test.go @@ -45,5 +45,5 @@ func TestSchemaUpdatesCopyCollectionWithRemoveIDAndReplaceName(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/index/simple_test.go b/tests/integration/schema/updates/index/simple_test.go new file mode 100644 index 0000000000..970ef2bb86 --- /dev/null +++ b/tests/integration/schema/updates/index/simple_test.go @@ -0,0 +1,233 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestPatching_ForCollectionWithIndex_StillWorks(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test patching schema for collection with index still works", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String @index + age: Int @index + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.Request{ + Request: `query { + Users { + name + age + email + } + }`, + Results: []map[string]any{}, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestPatching_IfAttemptToAddIndex_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test adding index to collection via patch fails", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String @index + age: Int + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Indexes/-", "value": { + "Name": "some_index", + "ID": 0, + "Fields": [ + { + "Name": "age", + "Direction": "ASC" + } + ] + } + } + ] + `, + ExpectedError: "adding indexes via patch is not supported. ProposedName: some_index", + }, + testUtils.Request{ + Request: `query { + Users { + name + } + }`, + Results: []map[string]any{}, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestPatching_IfAttemptToDropIndex_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test dropping index from collection via patch fails", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String @index + age: Int @index(name: "users_age_index") + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "remove", "path": "/Users/Indexes/1" } + ] + `, + ExpectedError: "dropping indexes via patch is not supported. Name: users_age_index", + }, + testUtils.Request{ + Request: `query { + Users { + name + } + }`, + Results: []map[string]any{}, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestPatching_IfAttemptToChangeIndexName_ReturnError(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test changing index's name via patch fails", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String @index + age: Int + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "replace", "path": "/Users/Indexes/0/Name", "value": "new_index_name" } + ] + `, + ExpectedError: "adding indexes via patch is not supported. ProposedName: new_index_name", + }, + testUtils.Request{ + Request: `query { + Users { + name + } + }`, + Results: []map[string]any{}, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestPatching_IfAttemptToChangeIndexField_ReturnError(t *testing.T) { + testCases := []struct { + description string + patch string + }{ + { + description: "Test adding a field to an index via patch fails", + patch: ` + [ + { "op": "add", "path": "/Users/Indexes/0/Fields/-", "value": { + "Name": "age", + "Direction": "ASC" + } + } + ] + `, + }, + { + description: "Test removing a field from an index via patch fails", + patch: ` + [ + { "op": "remove", "path": "/Users/Indexes/0/Fields/0" } + ] + `, + }, + { + description: "Test changing index's field name via patch fails", + patch: ` + [ + { "op": "replace", "path": "/Users/Indexes/0/Fields/0/Name", "value": "new_field_name" } + ] + `, + }, + { + description: "Test changing index's field direction via patch fails", + patch: ` + [ + { "op": "replace", "path": "/Users/Indexes/0/Fields/0/Direction", "value": "DESC" } + ] + `, + }, + } + + for _, testCase := range testCases { + test := testUtils.TestCase{ + Description: testCase.description, + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String @index + age: Int + } + `, + }, + testUtils.SchemaPatch{ + Patch: testCase.patch, + ExpectedError: "changing indexes via patch is not supported", + }, + testUtils.Request{ + Request: `query { + Users { + name + } + }`, + Results: []map[string]any{}, + }, + }, + } + testUtils.ExecuteTestCase(t, test) + } +} diff --git a/tests/integration/schema/updates/move/field/simple_test.go b/tests/integration/schema/updates/move/field/simple_test.go index 51feac090f..197b9410b7 100644 --- a/tests/integration/schema/updates/move/field/simple_test.go +++ b/tests/integration/schema/updates/move/field/simple_test.go @@ -38,5 +38,5 @@ func TestSchemaUpdatesMoveFieldErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/move/simple_test.go b/tests/integration/schema/updates/move/simple_test.go index e91a5f930c..60e0611746 100644 --- a/tests/integration/schema/updates/move/simple_test.go +++ b/tests/integration/schema/updates/move/simple_test.go @@ -84,5 +84,5 @@ func TestSchemaUpdatesMoveCollectionDoesNothing(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/remove/fields/simple_test.go b/tests/integration/schema/updates/remove/fields/simple_test.go index 34b9e67890..f4fa6c2482 100644 --- a/tests/integration/schema/updates/remove/fields/simple_test.go +++ b/tests/integration/schema/updates/remove/fields/simple_test.go @@ -38,7 +38,7 @@ func TestSchemaUpdatesRemoveFieldErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveAllFieldsErrors(t *testing.T) { @@ -63,7 +63,7 @@ func TestSchemaUpdatesRemoveAllFieldsErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveFieldNameErrors(t *testing.T) { @@ -88,7 +88,7 @@ func TestSchemaUpdatesRemoveFieldNameErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveFieldIDErrors(t *testing.T) { @@ -113,7 +113,7 @@ func TestSchemaUpdatesRemoveFieldIDErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveFieldKindErrors(t *testing.T) { @@ -138,7 +138,7 @@ func TestSchemaUpdatesRemoveFieldKindErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveFieldTypErrors(t *testing.T) { @@ -163,7 +163,7 @@ func TestSchemaUpdatesRemoveFieldTypErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveFieldSchemaErrors(t *testing.T) { @@ -192,7 +192,7 @@ func TestSchemaUpdatesRemoveFieldSchemaErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveFieldRelationNameErrors(t *testing.T) { @@ -221,7 +221,7 @@ func TestSchemaUpdatesRemoveFieldRelationNameErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveFieldRelationTypeErrors(t *testing.T) { @@ -250,5 +250,5 @@ func TestSchemaUpdatesRemoveFieldRelationTypeErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Author", "Book"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/remove/simple_test.go b/tests/integration/schema/updates/remove/simple_test.go index 85e4e10dc9..1bbb956f4b 100644 --- a/tests/integration/schema/updates/remove/simple_test.go +++ b/tests/integration/schema/updates/remove/simple_test.go @@ -38,7 +38,7 @@ func TestSchemaUpdatesRemoveCollectionNameErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveCollectionIDErrors(t *testing.T) { @@ -63,7 +63,7 @@ func TestSchemaUpdatesRemoveCollectionIDErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveSchemaIDErrors(t *testing.T) { @@ -88,7 +88,7 @@ func TestSchemaUpdatesRemoveSchemaIDErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveSchemaVersionIDErrors(t *testing.T) { @@ -122,7 +122,7 @@ func TestSchemaUpdatesRemoveSchemaVersionIDErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesRemoveSchemaNameErrors(t *testing.T) { @@ -147,5 +147,5 @@ func TestSchemaUpdatesRemoveSchemaNameErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/replace/field/simple_test.go b/tests/integration/schema/updates/replace/field/simple_test.go index 65c7087a55..e56f708f99 100644 --- a/tests/integration/schema/updates/replace/field/simple_test.go +++ b/tests/integration/schema/updates/replace/field/simple_test.go @@ -38,7 +38,7 @@ func TestSchemaUpdatesReplaceFieldErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesReplaceFieldWithIDErrors(t *testing.T) { @@ -63,5 +63,5 @@ func TestSchemaUpdatesReplaceFieldWithIDErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/replace/simple_test.go b/tests/integration/schema/updates/replace/simple_test.go index b428a7ecac..600a12b69c 100644 --- a/tests/integration/schema/updates/replace/simple_test.go +++ b/tests/integration/schema/updates/replace/simple_test.go @@ -51,7 +51,7 @@ func TestSchemaUpdatesReplaceCollectionErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } /* WIP @@ -108,6 +108,6 @@ func TestSchemaUpdatesReplaceCollectionNameWithExistingDoesNotChangeVersionID(t }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } */ diff --git a/tests/integration/schema/updates/test/add_field_test.go b/tests/integration/schema/updates/test/add_field_test.go index c1a5709e64..179dddbc43 100644 --- a/tests/integration/schema/updates/test/add_field_test.go +++ b/tests/integration/schema/updates/test/add_field_test.go @@ -46,7 +46,7 @@ func TestSchemaUpdatesTestAddField(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesTestAddFieldBlockedByTest(t *testing.T) { @@ -80,5 +80,5 @@ func TestSchemaUpdatesTestAddFieldBlockedByTest(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/test/field/simple_test.go b/tests/integration/schema/updates/test/field/simple_test.go index c6f0d4783f..38f27f0d0e 100644 --- a/tests/integration/schema/updates/test/field/simple_test.go +++ b/tests/integration/schema/updates/test/field/simple_test.go @@ -37,7 +37,7 @@ func TestSchemaUpdatesTestFieldNameErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesTestFieldNamePasses(t *testing.T) { @@ -60,7 +60,7 @@ func TestSchemaUpdatesTestFieldNamePasses(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesTestFieldErrors(t *testing.T) { @@ -84,7 +84,7 @@ func TestSchemaUpdatesTestFieldErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesTestFieldPasses(t *testing.T) { @@ -108,5 +108,5 @@ func TestSchemaUpdatesTestFieldPasses(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/schema/updates/test/simple_test.go b/tests/integration/schema/updates/test/simple_test.go index 0e0eb9e06a..e18e008490 100644 --- a/tests/integration/schema/updates/test/simple_test.go +++ b/tests/integration/schema/updates/test/simple_test.go @@ -37,7 +37,7 @@ func TestSchemaUpdatesTestCollectionNameErrors(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaUpdatesTestCollectionNamePasses(t *testing.T) { @@ -60,7 +60,7 @@ func TestSchemaUpdatesTestCollectionNamePasses(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } /* WIP @@ -116,6 +116,6 @@ func TestSchemaUpdatesTestCollectionNameDoesNotChangeVersionID(t *testing.T) { }, }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } */ diff --git a/tests/integration/schema/with_inline_array_test.go b/tests/integration/schema/with_inline_array_test.go index 7b15639884..9a3e9a9274 100644 --- a/tests/integration/schema/with_inline_array_test.go +++ b/tests/integration/schema/with_inline_array_test.go @@ -43,7 +43,7 @@ func TestSchemaInlineArrayCreatesSchemaGivenSingleType(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users"}, test) + testUtils.ExecuteTestCase(t, test) } func TestSchemaInlineArrayCreatesSchemaGivenSecondType(t *testing.T) { @@ -80,5 +80,5 @@ func TestSchemaInlineArrayCreatesSchemaGivenSecondType(t *testing.T) { }, } - testUtils.ExecuteTestCase(t, []string{"Users", "Books"}, test) + testUtils.ExecuteTestCase(t, test) } diff --git a/tests/integration/state.go b/tests/integration/state.go new file mode 100644 index 0000000000..f7d4dd45a0 --- /dev/null +++ b/tests/integration/state.go @@ -0,0 +1,106 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "context" + "testing" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/net" +) + +type state struct { + // The test context. + ctx context.Context + + // The Go Test test state + t *testing.T + + // The TestCase currently being executed. + testCase TestCase + + // The type of database currently being tested. + dbt DatabaseType + + // Any explicit transactions active in this test. + // + // This is order dependent and the property is accessed by index. + txns []datastore.Txn + + // Will recieve an item once all actions have finished processing. + allActionsDone chan struct{} + + // These channels will recieve a function which asserts results of any subscription requests. + subscriptionResultsChans []chan func() + + // These synchronisation channels allow async actions to track their completion. + syncChans []chan struct{} + + // The addresses of any nodes configured. + nodeAddresses []string + + // The configurations for any nodes + nodeConfigs []config.Config + + // The nodes active in this test. + nodes []*net.Node + + // The paths to any file-based databases active in this test. + dbPaths []string + + // Collections by index, by nodeID present in the test. + // Indexes matches that of collectionNames. + collections [][]client.Collection + + // The names of the collections active in this test. + // Indexes matches that of collections. + collectionNames []string + + // Documents by index, by collection index. + // + // Each index is assumed to be global, and may be expected across multiple + // nodes. + documents [][]*client.Document + + // Indexes, by index, by collection index, by node index. + indexes [][][]client.IndexDescription +} + +// newState returns a new fresh state for the given testCase. +func newState( + ctx context.Context, + t *testing.T, + testCase TestCase, + dbt DatabaseType, + collectionNames []string, +) *state { + return &state{ + ctx: ctx, + t: t, + testCase: testCase, + dbt: dbt, + txns: []datastore.Txn{}, + allActionsDone: make(chan struct{}), + subscriptionResultsChans: []chan func(){}, + syncChans: []chan struct{}{}, + nodeAddresses: []string{}, + nodeConfigs: []config.Config{}, + nodes: []*net.Node{}, + dbPaths: []string{}, + collections: [][]client.Collection{}, + collectionNames: collectionNames, + documents: [][]*client.Document{}, + indexes: [][][]client.IndexDescription{}, + } +} diff --git a/tests/integration/subscription/utils.go b/tests/integration/subscription/utils.go index 9c662eb3e9..7c8fa13f81 100644 --- a/tests/integration/subscription/utils.go +++ b/tests/integration/subscription/utils.go @@ -19,7 +19,6 @@ import ( func execute(t *testing.T, test testUtils.TestCase) { testUtils.ExecuteTestCase( t, - []string{"User"}, testUtils.TestCase{ Description: test.Description, Actions: append( diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index 9a8f2300df..38624d42e8 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -13,6 +13,7 @@ package tests import ( "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" ) @@ -48,6 +49,9 @@ type ConfigureNode func() config.Config type Restart struct{} // SchemaUpdate is an action that will update the database schema. +// +// WARNING: getCollectionNames will not work with schemas ending in `type`, e.g. `user_type` +// and should be updated if such a name is desired. type SchemaUpdate struct { // NodeID may hold the ID (index) of a node to apply this update to. // @@ -151,19 +155,27 @@ type UpdateDoc struct { DontSync bool } -// Request represents a standard Defra (GQL) request. -type Request struct { - // NodeID may hold the ID (index) of a node to execute this request on. +// CreateIndex will attempt to create the given secondary index for the given collection +// using the collection api. +type CreateIndex struct { + // NodeID may hold the ID (index) of a node to create the secondary index on. // - // If a value is not provided the request will be executed against all nodes, - // in which case the expected results must all match across all nodes. + // If a value is not provided the index will be created in all nodes. NodeID immutable.Option[int] - // The request to execute. - Request string + // The collection for which this index should be created. + CollectionID int - // The expected (data) results of the issued request. - Results []map[string]any + // The name of the index to create. If not provided, one will be generated. + IndexName string + + // The name of the field to index. Used only for single field indexes. + FieldName string + + // The names of the fields to index. Used only for composite indexes. + FieldsNames []string + // The directions of the 'FieldsNames' to index. Used only for composite indexes. + Directions []client.IndexDirection // Any error expected from the action. Optional. // @@ -172,18 +184,66 @@ type Request struct { ExpectedError string } -// TransactionRequest2 represents a transactional request. -// -// A new transaction will be created for the first TransactionRequest2 of any given -// TransactionId. TransactionRequest2s will be submitted to the database in the order -// in which they are recieved (interleaving amongst other actions if provided), however -// they will not be commited until a TransactionCommit of matching TransactionId is -// provided. -type TransactionRequest2 struct { - // Used to identify the transaction for this to run against. - TransactionID int +// DropIndex will attempt to drop the given secondary index from the given collection +// using the collection api. +type DropIndex struct { + // NodeID may hold the ID (index) of a node to delete the secondary index from. + // + // If a value is not provided the index will be deleted from all nodes. + NodeID immutable.Option[int] + + // The collection from which the index should be deleted. + CollectionID int + + // The index-identifier of the secondary index within the collection. + // This is based on the order in which it was created, not the ordering of + // the indexes within the database. + IndexID int + + // The index name of the secondary index within the collection. + // If it is provided, `IndexID` is ignored. + IndexName string + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} + +// GetIndex will attempt to get the given secondary index from the given collection +// using the collection api. +type GetIndexes struct { + // NodeID may hold the ID (index) of a node to create the secondary index on. + // + // If a value is not provided the indexes will be retrieved from the first nodes. + NodeID immutable.Option[int] + + // The collection for which this indexes should be retrieved. + CollectionID int + + // The expected indexes to be returned. + ExpectedIndexes []client.IndexDescription + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} + +// Request represents a standard Defra (GQL) request. +type Request struct { + // NodeID may hold the ID (index) of a node to execute this request on. + // + // If a value is not provided the request will be executed against all nodes, + // in which case the expected results must all match across all nodes. + NodeID immutable.Option[int] + + // Used to identify the transaction for this to run against. Optional. + TransactionID immutable.Option[int] - // The request to run against the transaction. + // The request to execute. Request string // The expected (data) results of the issued request. @@ -213,6 +273,9 @@ type TransactionCommit struct { // The subscription will remain active until shortly after all actions have been processed. // The results of the subscription will then be asserted upon. type SubscriptionRequest struct { + // NodeID is the node ID (index) of the node in which to subscribe to. + NodeID immutable.Option[int] + // The subscription request to submit. Request string @@ -227,6 +290,9 @@ type SubscriptionRequest struct { } type IntrospectionRequest struct { + // NodeID is the node ID (index) of the node in which to introspect. + NodeID immutable.Option[int] + // The introspection request to use when fetching schema state. // // Available properties can be found in the GQL spec: @@ -257,6 +323,9 @@ type IntrospectionRequest struct { // The GraphQL clients usually use this to fetch the schema state with a default introspection // query they provide. type ClientIntrospectionRequest struct { + // NodeID is the node ID (index) of the node in which to introspect. + NodeID immutable.Option[int] + // The introspection request to use when fetching schema state. Request string @@ -266,3 +335,43 @@ type ClientIntrospectionRequest struct { // contains this string. ExpectedError string } + +// BackupExport will attempt to export data from the datastore using the db api. +type BackupExport struct { + // NodeID may hold the ID (index) of a node to generate the backup from. + // + // If a value is not provided the indexes will be retrieved from the first nodes. + NodeID immutable.Option[int] + + // The backup configuration. + Config client.BackupConfig + + // Content expected to be found in the backup file. + ExpectedContent string + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} + +// BackupExport will attempt to export data from the datastore using the db api. +type BackupImport struct { + // NodeID may hold the ID (index) of a node to generate the backup from. + // + // If a value is not provided the indexes will be retrieved from the first nodes. + NodeID immutable.Option[int] + + // The backup file path. + Filepath string + + // The backup file content. + ImportContent string + + // Any error expected from the action. Optional. + // + // String can be a partial, and the test will pass if an error is returned that + // contains this string. + ExpectedError string +} diff --git a/tests/integration/utils.go b/tests/integration/utils.go index 2b5f1019a6..f15ac8f0ef 100644 --- a/tests/integration/utils.go +++ b/tests/integration/utils.go @@ -14,27 +14,10 @@ import ( "testing" ) -// Represents a request assigned to a particular transaction. -type TransactionRequest struct { - // Used to identify the transaction for this to run against (allows multiple - // requtests to share a single transaction) - TransactionId int - // The request to run against the transaction - Request string - // The expected (data) results of the issued request - Results []map[string]any - // The expected error resulting from the issued request. Also checked against the txn commit. - ExpectedError string -} - type RequestTestCase struct { Description string Request string - // A collection of requests that are tied to a specific transaction. - // These will be executed before `Request` (if specified), in the order that they are listed here. - TransactionalRequests []TransactionRequest - // docs is a map from Collection Index, to a list // of docs in stringified JSON format Docs map[int][]string @@ -88,38 +71,6 @@ func ExecuteRequestTestCase( } } - for _, request := range test.TransactionalRequests { - actions = append( - actions, - TransactionRequest2{ - TransactionID: request.TransactionId, - Request: request.Request, - Results: request.Results, - ExpectedError: request.ExpectedError, - }, - ) - } - - // The old test framework commited all the transactions at the end - // so we can just lump these here, they must however be commited in - // the order in which they were first recieved. - txnIndexesCommited := map[int]struct{}{} - for _, request := range test.TransactionalRequests { - if _, alreadyCommited := txnIndexesCommited[request.TransactionId]; alreadyCommited { - // Only commit each transaction once. - continue - } - - txnIndexesCommited[request.TransactionId] = struct{}{} - actions = append( - actions, - TransactionCommit{ - TransactionID: request.TransactionId, - ExpectedError: request.ExpectedError, - }, - ) - } - if test.Request != "" { actions = append( actions, @@ -133,7 +84,6 @@ func ExecuteRequestTestCase( ExecuteTestCase( t, - collectionNames, TestCase{ Description: test.Description, Actions: actions, diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 17ae66aa9d..3f66f07d9c 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -26,14 +26,13 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/datastore" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v3" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" - "github.com/sourcenetwork/defradb/node" + "github.com/sourcenetwork/defradb/net" ) const ( @@ -66,6 +65,10 @@ var ( const subscriptionTimeout = 1 * time.Second +// Instantiating lenses is expensive, and our tests do not benefit from a large number of them, +// so we explicitly set it to a low value. +const lensPoolSize = 2 + var databaseDir string var rootDatabaseDir string @@ -165,7 +168,7 @@ func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, err return nil, err } - dbopts = append(dbopts, db.WithUpdateEvents()) + dbopts = append(dbopts, db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize)) db, err := db.NewDB(ctx, rootstore, dbopts...) if err != nil { @@ -177,7 +180,7 @@ func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, err func NewInMemoryDB(ctx context.Context) (client.DB, error) { rootstore := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize)) if err != nil { return nil, err } @@ -206,7 +209,7 @@ func newBadgerFileDB(ctx context.Context, t testing.TB, path string) (client.DB, return nil, err } - db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents()) + db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize)) if err != nil { return nil, err } @@ -266,9 +269,10 @@ func GetDatabase(ctx context.Context, t *testing.T, dbt DatabaseType) (client.DB // configured to do so (the CI will do so, but disabled by default as it is slow). func ExecuteTestCase( t *testing.T, - collectionNames []string, testCase TestCase, ) { + collectionNames := getCollectionNames(testCase) + if DetectDbChanges && DetectDbChangesPreTestChecks(t, collectionNames) { return } @@ -291,122 +295,104 @@ func executeTestCase( testCase TestCase, dbt DatabaseType, ) { - var done bool log.Info(ctx, testCase.Description, logging.NewKV("Database", dbt)) flattenActions(&testCase) startActionIndex, endActionIndex := getActionRange(testCase) - txns := []datastore.Txn{} - allActionsDone := make(chan struct{}) - resultsChans := []chan func(){} - syncChans := []chan struct{}{} - nodeAddresses := []string{} - // The actions responsible for configuring the node - nodeConfigs := []config.Config{} - nodes, dbPaths := getStartingNodes(ctx, t, dbt, collectionNames, testCase) + + s := newState(ctx, t, testCase, dbt, collectionNames) + setStartingNodes(s) + // It is very important that the databases are always closed, otherwise resources will leak // as tests run. This is particularly important for file based datastores. - defer closeNodes(ctx, t, nodes) + defer closeNodes(s) // Documents and Collections may already exist in the database if actions have been split // by the change detector so we should fetch them here at the start too (if they exist). // collections are by node (index), as they are specific to nodes. - collections := getCollections(ctx, t, nodes, collectionNames) - // documents are by collection (index), these are not node specific. - documents := getDocuments(ctx, t, testCase, collections, startActionIndex) + refreshCollections(s) + refreshDocuments(s, startActionIndex) + refreshIndexes(s) for i := startActionIndex; i <= endActionIndex; i++ { - // declare default database for ease of use - var db client.DB - if len(nodes) > 0 { - db = nodes[0].DB - } - switch action := testCase.Actions[i].(type) { case ConfigureNode: - if DetectDbChanges { - // We do not yet support the change detector for tests running across multiple nodes. - t.SkipNow() - return - } - cfg := action() - node, address, path := configureNode(ctx, t, dbt, cfg) - nodes = append(nodes, node) - nodeAddresses = append(nodeAddresses, address) - dbPaths = append(dbPaths, path) - nodeConfigs = append(nodeConfigs, cfg) + configureNode(s, action) case Restart: - // Append the new syncChans on top of the previous - the old syncChans will be closed - // gracefully as part of the node closure. - syncChans = append( - syncChans, - restartNodes(ctx, t, testCase, dbt, nodes, dbPaths, nodeAddresses, nodeConfigs)..., - ) - - // If the db was restarted we need to refresh the collection definitions as the old instances - // will reference the old (closed) database instances. - collections = getCollections(ctx, t, nodes, collectionNames) + restartNodes(s, i) case ConnectPeers: - syncChans = append(syncChans, connectPeers(ctx, t, testCase, action, nodes, nodeAddresses)) + connectPeers(s, action) case ConfigureReplicator: - syncChans = append(syncChans, configureReplicator(ctx, t, testCase, action, nodes, nodeAddresses)) + configureReplicator(s, action) case SubscribeToCollection: - subscribeToCollection(ctx, t, testCase, action, nodes, collections) + subscribeToCollection(s, action) case UnsubscribeToCollection: - unsubscribeToCollection(ctx, t, testCase, action, nodes, collections) + unsubscribeToCollection(s, action) case GetAllP2PCollections: - getAllP2PCollections(ctx, t, action, nodes, collections) + getAllP2PCollections(s, action) case SchemaUpdate: - updateSchema(ctx, t, nodes, testCase, action) - // If the schema was updated we need to refresh the collection definitions. - collections = getCollections(ctx, t, nodes, collectionNames) + updateSchema(s, action) case SchemaPatch: - patchSchema(ctx, t, nodes, testCase, action) - // If the schema was updated we need to refresh the collection definitions. - collections = getCollections(ctx, t, nodes, collectionNames) + patchSchema(s, action) + + case ConfigureMigration: + configureMigration(s, action) + + case GetMigrations: + getMigrations(s, action) case CreateDoc: - documents = createDoc(ctx, t, testCase, nodes, collections, documents, action) + createDoc(s, action) case DeleteDoc: - deleteDoc(ctx, t, testCase, nodes, collections, documents, action) + deleteDoc(s, action) case UpdateDoc: - updateDoc(ctx, t, testCase, nodes, collections, documents, action) + updateDoc(s, action) + + case CreateIndex: + createIndex(s, action) + + case DropIndex: + dropIndex(s, action) + + case GetIndexes: + getIndexes(s, action) - case TransactionRequest2: - txns = executeTransactionRequest(ctx, t, db, txns, testCase, action) + case BackupExport: + backupExport(s, action) + + case BackupImport: + backupImport(s, action) case TransactionCommit: - commitTransaction(ctx, t, txns, testCase, action) + commitTransaction(s, action) case SubscriptionRequest: - var resultsChan chan func() - resultsChan, done = executeSubscriptionRequest(ctx, t, allActionsDone, db, testCase, action) - if done { - return - } - resultsChans = append(resultsChans, resultsChan) + executeSubscriptionRequest(s, action) case Request: - executeRequest(ctx, t, nodes, testCase, action) + executeRequest(s, action) + + case ExplainRequest: + executeExplainRequest(s, action) case IntrospectionRequest: - assertIntrospectionResults(ctx, t, testCase.Description, db, action) + assertIntrospectionResults(s, action) case ClientIntrospectionRequest: - assertClientIntrospectionResults(ctx, t, testCase.Description, db, action) + assertClientIntrospectionResults(s, action) case WaitForSync: - waitForSync(t, testCase, action, syncChans) + waitForSync(s, action) case SetupComplete: // no-op, just continue. @@ -417,9 +403,9 @@ func executeTestCase( } // Notify any active subscriptions that all requests have been sent. - close(allActionsDone) + close(s.allActionsDone) - for _, resultsChan := range resultsChans { + for _, resultsChan := range s.subscriptionResultsChans { select { case subscriptionAssert := <-resultsChan: // We want to assert back in the main thread so failures get recorded properly @@ -432,30 +418,82 @@ func executeTestCase( } } +// getCollectionNames gets an ordered, unique set of collection names across all nodes +// from the action set within the given test case. +// +// It preserves the order in which they are declared, and shares indexes across all nodes, so +// if a second node adds a collection of a name that was previously declared in another node +// the new node will respect the index originally assigned. This allows collections to be +// referenced across multiple nodes by a consistent, predictable index - allowing a single +// action to target the same collection across multiple nodes. +// +// WARNING: This will not work with schemas ending in `type`, e.g. `user_type` +func getCollectionNames(testCase TestCase) []string { + nextIndex := 0 + collectionIndexByName := map[string]int{} + + for _, a := range testCase.Actions { + switch action := a.(type) { + case SchemaUpdate: + if action.ExpectedError != "" { + // If an error is expected then no collections should result from this action + continue + } + + // WARNING: This will not work with schemas ending in `type`, e.g. `user_type` + splitByType := strings.Split(action.Schema, "type ") + // Skip the first, as that preceeds `type ` if `type ` is present, + // else there are no types. + for i := 1; i < len(splitByType); i++ { + wipSplit := strings.TrimLeft(splitByType[i], " ") + indexOfLastChar := strings.IndexAny(wipSplit, " {") + if indexOfLastChar <= 0 { + // This should never happen + continue + } + + collectionName := wipSplit[:indexOfLastChar] + if _, ok := collectionIndexByName[collectionName]; ok { + // Collection name has already been added, possibly via another node + continue + } + + collectionIndexByName[collectionName] = nextIndex + nextIndex++ + } + } + } + + collectionNames := make([]string, len(collectionIndexByName)) + for name, index := range collectionIndexByName { + collectionNames[index] = name + } + + return collectionNames +} + // closeNodes closes all the given nodes, ensuring that resources are properly released. func closeNodes( - ctx context.Context, - t *testing.T, - nodes []*node.Node, + s *state, ) { - for _, node := range nodes { + for _, node := range s.nodes { if node.Peer != nil { err := node.Close() - require.NoError(t, err) + require.NoError(s.t, err) } - node.DB.Close(ctx) + node.DB.Close(s.ctx) } } // getNodes gets the set of applicable nodes for the given nodeID. // // If nodeID has a value it will return that node only, otherwise all nodes will be returned. -func getNodes(nodeID immutable.Option[int], nodes []*node.Node) []*node.Node { +func getNodes(nodeID immutable.Option[int], nodes []*net.Node) []*net.Node { if !nodeID.HasValue() { return nodes } - return []*node.Node{nodes[nodeID.Value()]} + return []*net.Node{nodes[nodeID.Value()]} } // getNodeCollections gets the set of applicable collections for the given nodeID. @@ -557,25 +595,24 @@ ActionLoop: } else if firstNonSetupIndex > -1 { // We must not set this to -1 :) startIndex = firstNonSetupIndex + } else { + // if we don't have any non-mutation actions, just use the last action + startIndex = endIndex } } return startIndex, endIndex } -// getStartingNodes returns a set of initial Defra nodes for the test to execute against. +// setStartingNodes adds a set of initial Defra nodes for the test to execute against. // -// If a node(s) has been explicitly configured via a `ConfigureNode` action then an empty -// set will be returned. -func getStartingNodes( - ctx context.Context, - t *testing.T, - dbt DatabaseType, - collectionNames []string, - testCase TestCase, -) ([]*node.Node, []string) { +// If a node(s) has been explicitly configured via a `ConfigureNode` action then no new +// nodes will be added. +func setStartingNodes( + s *state, +) { hasExplicitNode := false - for _, action := range testCase.Actions { + for _, action := range s.testCase.Actions { switch action.(type) { case ConfigureNode: hasExplicitNode = true @@ -584,123 +621,125 @@ func getStartingNodes( // If nodes have not been explicitly configured via actions, setup a default one. if !hasExplicitNode { - db, path, err := GetDatabase(ctx, t, dbt) - require.Nil(t, err) + db, path, err := GetDatabase(s.ctx, s.t, s.dbt) + require.Nil(s.t, err) - return []*node.Node{ - { - DB: db, - }, - }, []string{ - path, - } + s.nodes = append(s.nodes, &net.Node{ + DB: db, + }) + s.dbPaths = append(s.dbPaths, path) } - - return []*node.Node{}, []string{} } func restartNodes( - ctx context.Context, - t *testing.T, - testCase TestCase, - dbt DatabaseType, - nodes []*node.Node, - dbPaths []string, - nodeAddresses []string, - configureActions []config.Config, -) []chan struct{} { - if dbt == badgerIMType || dbt == defraIMType { - return nil + s *state, + actionIndex int, +) { + if s.dbt == badgerIMType || s.dbt == defraIMType { + return } - closeNodes(ctx, t, nodes) + closeNodes(s) // We need to restart the nodes in reverse order, to avoid dial backoff issues. - for i := len(nodes) - 1; i >= 0; i-- { + for i := len(s.nodes) - 1; i >= 0; i-- { originalPath := databaseDir - databaseDir = dbPaths[i] - db, _, err := GetDatabase(ctx, t, dbt) - require.Nil(t, err) + databaseDir = s.dbPaths[i] + db, _, err := GetDatabase(s.ctx, s.t, s.dbt) + require.Nil(s.t, err) databaseDir = originalPath - if len(configureActions) == 0 { + if len(s.nodeConfigs) == 0 { // If there are no explicit node configuration actions the node will be // basic (i.e. no P2P stuff) and can be yielded now. - nodes[i] = &node.Node{ + s.nodes[i] = &net.Node{ DB: db, } continue } - cfg := configureActions[i] + cfg := s.nodeConfigs[i] // We need to make sure the node is configured with its old address, otherwise // a new one may be selected and reconnnection to it will fail. - cfg.Net.P2PAddress = strings.Split(nodeAddresses[i], "/p2p/")[0] - var n *node.Node - n, err = node.NewNode( - ctx, + cfg.Net.P2PAddress = strings.Split(s.nodeAddresses[i], "/p2p/")[0] + var n *net.Node + n, err = net.NewNode( + s.ctx, db, - cfg.NodeConfig(), + net.WithConfig(&cfg), ) - require.NoError(t, err) + require.NoError(s.t, err) if err := n.Start(); err != nil { closeErr := n.Close() if closeErr != nil { - t.Fatal(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr) + s.t.Fatal(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr) } - require.NoError(t, err) + require.NoError(s.t, err) } - nodes[i] = n + s.nodes[i] = n + } + + // The index of the action after the last wait action before the current restart action. + // We wish to resume the wait clock from this point onwards. + waitGroupStartIndex := 0 +actionLoop: + for i := actionIndex; i >= 0; i-- { + switch s.testCase.Actions[i].(type) { + case WaitForSync: + // +1 as we do not wish to resume from the wait itself, but the next action + // following it. This may be the current restart action. + waitGroupStartIndex = i + 1 + break actionLoop + } } - syncChans := []chan struct{}{} - for _, tc := range testCase.Actions { + for _, tc := range s.testCase.Actions { switch action := tc.(type) { case ConnectPeers: - syncChans = append(syncChans, setupPeerWaitSync( - ctx, t, testCase, action, nodes[action.SourceNodeID], nodes[action.TargetNodeID], - )) + // Give the nodes a chance to connect to each other and learn about each other's subscribed topics. + time.Sleep(100 * time.Millisecond) + setupPeerWaitSync( + s, waitGroupStartIndex, action, s.nodes[action.SourceNodeID], s.nodes[action.TargetNodeID], + ) case ConfigureReplicator: - syncChans = append(syncChans, setupRepicatorWaitSync( - ctx, t, testCase, action, nodes[action.SourceNodeID], nodes[action.TargetNodeID], - )) + // Give the nodes a chance to connect to each other and learn about each other's subscribed topics. + time.Sleep(100 * time.Millisecond) + setupReplicatorWaitSync( + s, waitGroupStartIndex, action, s.nodes[action.SourceNodeID], s.nodes[action.TargetNodeID], + ) } } - // Give the nodes a chance to connect to each other and learn about each other's subscrivbed topics. - time.Sleep(100 * time.Millisecond) - - return syncChans + // If the db was restarted we need to refresh the collection definitions as the old instances + // will reference the old (closed) database instances. + refreshCollections(s) + refreshIndexes(s) } -// getCollections returns all the collections of the given names, preserving order. +// refreshCollections refreshes all the collections of the given names, preserving order. // // If a given collection is not present in the database the value at the corresponding // result-index will be nil. -func getCollections( - ctx context.Context, - t *testing.T, - nodes []*node.Node, - collectionNames []string, -) [][]client.Collection { - collections := make([][]client.Collection, len(nodes)) +func refreshCollections( + s *state, +) { + s.collections = make([][]client.Collection, len(s.nodes)) - for nodeID, node := range nodes { - collections[nodeID] = make([]client.Collection, len(collectionNames)) - allCollections, err := node.DB.GetAllCollections(ctx) - require.Nil(t, err) + for nodeID, node := range s.nodes { + s.collections[nodeID] = make([]client.Collection, len(s.collectionNames)) + allCollections, err := node.DB.GetAllCollections(s.ctx) + require.Nil(s.t, err) - for i, collectionName := range collectionNames { + for i, collectionName := range s.collectionNames { for _, collection := range allCollections { if collection.Name() == collectionName { - collections[nodeID][i] = collection + s.collections[nodeID][i] = collection break } } } } - return collections } // configureNode configures and starts a new Defra node using the provided configuration. @@ -708,66 +747,71 @@ func getCollections( // It returns the new node, and its peer address. Any errors generated during configuration // will result in a test failure. func configureNode( - ctx context.Context, - t *testing.T, - dbt DatabaseType, - cfg config.Config, -) (*node.Node, string, string) { + s *state, + action ConfigureNode, +) { + if DetectDbChanges { + // We do not yet support the change detector for tests running across multiple nodes. + s.t.SkipNow() + return + } + + cfg := action() // WARNING: This is a horrible hack both deduplicates/randomizes peer IDs // And affects where libp2p(?) stores some values on the file system, even when using // an in memory store. - cfg.Datastore.Badger.Path = t.TempDir() + cfg.Datastore.Badger.Path = s.t.TempDir() - db, path, err := GetDatabase(ctx, t, dbt) //disable change dector, or allow it? - require.NoError(t, err) + db, path, err := GetDatabase(s.ctx, s.t, s.dbt) //disable change dector, or allow it? + require.NoError(s.t, err) - var n *node.Node - log.Info(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) - n, err = node.NewNode( - ctx, + var n *net.Node + log.Info(s.ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) + n, err = net.NewNode( + s.ctx, db, - cfg.NodeConfig(), + net.WithConfig(&cfg), ) - require.NoError(t, err) + require.NoError(s.t, err) if err := n.Start(); err != nil { closeErr := n.Close() if closeErr != nil { - t.Fatal(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr) + s.t.Fatal(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr) } - require.NoError(t, err) + require.NoError(s.t, err) } address := fmt.Sprintf("%s/p2p/%s", n.ListenAddrs()[0].String(), n.PeerID()) + s.nodeAddresses = append(s.nodeAddresses, address) + s.nodeConfigs = append(s.nodeConfigs, cfg) - return n, address, path + s.nodes = append(s.nodes, n) + s.dbPaths = append(s.dbPaths, path) } -func getDocuments( - ctx context.Context, - t *testing.T, - testCase TestCase, - collections [][]client.Collection, +func refreshDocuments( + s *state, startActionIndex int, -) [][]*client.Document { - if len(collections) == 0 { +) { + if len(s.collections) == 0 { // This should only be possible at the moment for P2P testing, for which the // change detector is currently disabled. We'll likely need some fancier logic // here if/when we wish to enable it. - return [][]*client.Document{} + return } // For now just do the initial setup using the collections on the first node, // this may need to become more involved at a later date depending on testing // requirements. - documentsByCollection := make([][]*client.Document, len(collections[0])) + s.documents = make([][]*client.Document, len(s.collections[0])) - for i := range collections[0] { - documentsByCollection[i] = []*client.Document{} + for i := range s.collections[0] { + s.documents[i] = []*client.Document{} } for i := 0; i < startActionIndex; i++ { - switch action := testCase.Actions[i].(type) { + switch action := s.testCase.Actions[i].(type) { case CreateDoc: // We need to add the existing documents in the order in which the test case lists them // otherwise they cannot be referenced correctly by other actions. @@ -780,157 +824,390 @@ func getDocuments( // Just use the collection from the first relevant node, as all will be the same for this // purpose. - collection := getNodeCollections(action.NodeID, collections)[0][action.CollectionID] + collection := getNodeCollections(action.NodeID, s.collections)[0][action.CollectionID] // The document may have been mutated by other actions, so to be sure we have the latest // version without having to worry about the individual update mechanics we fetch it. - doc, err = collection.Get(ctx, doc.Key(), false) + doc, err = collection.Get(s.ctx, doc.Key(), false) if err != nil { // If an err has been returned, ignore it - it may be expected and if not // the test will fail later anyway continue } - documentsByCollection[action.CollectionID] = append(documentsByCollection[action.CollectionID], doc) + s.documents[action.CollectionID] = append(s.documents[action.CollectionID], doc) } } +} + +func refreshIndexes( + s *state, +) { + if len(s.collections) == 0 { + return + } - return documentsByCollection + s.indexes = make([][][]client.IndexDescription, len(s.collections)) + + for i, nodeCols := range s.collections { + s.indexes[i] = make([][]client.IndexDescription, len(nodeCols)) + + for j, col := range nodeCols { + if col == nil { + continue + } + colIndexes, err := col.GetIndexes(s.ctx) + if err != nil { + continue + } + + s.indexes[i][j] = colIndexes + } + } +} + +func getIndexes( + s *state, + action GetIndexes, +) { + if len(s.collections) == 0 { + return + } + + var expectedErrorRaised bool + actionNodes := getNodes(action.NodeID, s.nodes) + for nodeID, collections := range getNodeCollections(action.NodeID, s.collections) { + err := withRetry( + actionNodes, + nodeID, + func() error { + actualIndexes, err := collections[action.CollectionID].GetIndexes(s.ctx) + if err != nil { + return err + } + + assertIndexesListsEqual(action.ExpectedIndexes, + actualIndexes, s.t, s.testCase.Description) + + return nil + }, + ) + expectedErrorRaised = expectedErrorRaised || + AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + } + + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) +} + +func assertIndexesListsEqual( + expectedIndexes []client.IndexDescription, + actualIndexes []client.IndexDescription, + t *testing.T, + testDescription string, +) { + toNames := func(indexes []client.IndexDescription) []string { + names := make([]string, len(indexes)) + for i, index := range indexes { + names[i] = index.Name + } + return names + } + + require.ElementsMatch(t, toNames(expectedIndexes), toNames(actualIndexes), testDescription) + + toMap := func(indexes []client.IndexDescription) map[string]client.IndexDescription { + resultMap := map[string]client.IndexDescription{} + for _, index := range indexes { + resultMap[index.Name] = index + } + return resultMap + } + + expectedMap := toMap(expectedIndexes) + actualMap := toMap(actualIndexes) + for key := range expectedMap { + assertIndexesEqual(expectedMap[key], actualMap[key], t, testDescription) + } +} + +func assertIndexesEqual(expectedIndex, actualIndex client.IndexDescription, + t *testing.T, + testDescription string, +) { + assert.Equal(t, expectedIndex.Name, actualIndex.Name, testDescription) + assert.Equal(t, expectedIndex.ID, actualIndex.ID, testDescription) + + toNames := func(fields []client.IndexedFieldDescription) []string { + names := make([]string, len(fields)) + for i, field := range fields { + names[i] = field.Name + } + return names + } + + require.ElementsMatch(t, toNames(expectedIndex.Fields), toNames(actualIndex.Fields), testDescription) + + toMap := func(fields []client.IndexedFieldDescription) map[string]client.IndexedFieldDescription { + resultMap := map[string]client.IndexedFieldDescription{} + for _, field := range fields { + resultMap[field.Name] = field + } + return resultMap + } + + expectedMap := toMap(expectedIndex.Fields) + actualMap := toMap(actualIndex.Fields) + for key := range expectedMap { + assert.Equal(t, expectedMap[key], actualMap[key], testDescription) + } } // updateSchema updates the schema using the given details. func updateSchema( - ctx context.Context, - t *testing.T, - nodes []*node.Node, - testCase TestCase, + s *state, action SchemaUpdate, ) { - for _, node := range getNodes(action.NodeID, nodes) { - _, err := node.DB.AddSchema(ctx, action.Schema) - expectedErrorRaised := AssertError(t, testCase.Description, err, action.ExpectedError) + for _, node := range getNodes(action.NodeID, s.nodes) { + _, err := node.DB.AddSchema(s.ctx, action.Schema) + expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) } + + // If the schema was updated we need to refresh the collection definitions. + refreshCollections(s) + refreshIndexes(s) } func patchSchema( - ctx context.Context, - t *testing.T, - nodes []*node.Node, - testCase TestCase, + s *state, action SchemaPatch, ) { - for _, node := range getNodes(action.NodeID, nodes) { - err := node.DB.PatchSchema(ctx, action.Patch) - expectedErrorRaised := AssertError(t, testCase.Description, err, action.ExpectedError) + for _, node := range getNodes(action.NodeID, s.nodes) { + err := node.DB.PatchSchema(s.ctx, action.Patch) + expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) } + + // If the schema was updated we need to refresh the collection definitions. + refreshCollections(s) + refreshIndexes(s) } // createDoc creates a document using the collection api and caches it in the // given documents slice. func createDoc( - ctx context.Context, - t *testing.T, - testCase TestCase, - nodes []*node.Node, - nodeCollections [][]client.Collection, - documents [][]*client.Document, + s *state, action CreateDoc, -) [][]*client.Document { +) { // All the docs should be identical, and we only need 1 copy so taking the last // is okay. var doc *client.Document - actionNodes := getNodes(action.NodeID, nodes) - for nodeID, collections := range getNodeCollections(action.NodeID, nodeCollections) { + actionNodes := getNodes(action.NodeID, s.nodes) + for nodeID, collections := range getNodeCollections(action.NodeID, s.collections) { var err error doc, err = client.NewDocFromJSON([]byte(action.Doc)) - if AssertError(t, testCase.Description, err, action.ExpectedError) { - return nil + if AssertError(s.t, s.testCase.Description, err, action.ExpectedError) { + return } err = withRetry( actionNodes, nodeID, - func() error { return collections[action.CollectionID].Save(ctx, doc) }, + func() error { return collections[action.CollectionID].Save(s.ctx, doc) }, ) - if AssertError(t, testCase.Description, err, action.ExpectedError) { - return nil + if AssertError(s.t, s.testCase.Description, err, action.ExpectedError) { + return } } - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, false) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, false) - if action.CollectionID >= len(documents) { + if action.CollectionID >= len(s.documents) { // Expand the slice if required, so that the document can be accessed by collection index - documents = append(documents, make([][]*client.Document, action.CollectionID-len(documents)+1)...) + s.documents = append(s.documents, make([][]*client.Document, action.CollectionID-len(s.documents)+1)...) } - documents[action.CollectionID] = append(documents[action.CollectionID], doc) - - return documents + s.documents[action.CollectionID] = append(s.documents[action.CollectionID], doc) } // deleteDoc deletes a document using the collection api and caches it in the // given documents slice. func deleteDoc( - ctx context.Context, - t *testing.T, - testCase TestCase, - nodes []*node.Node, - nodeCollections [][]client.Collection, - documents [][]*client.Document, + s *state, action DeleteDoc, ) { - doc := documents[action.CollectionID][action.DocID] + doc := s.documents[action.CollectionID][action.DocID] var expectedErrorRaised bool - actionNodes := getNodes(action.NodeID, nodes) - for nodeID, collections := range getNodeCollections(action.NodeID, nodeCollections) { + actionNodes := getNodes(action.NodeID, s.nodes) + for nodeID, collections := range getNodeCollections(action.NodeID, s.collections) { err := withRetry( actionNodes, nodeID, func() error { - _, err := collections[action.CollectionID].DeleteWithKey(ctx, doc.Key()) + _, err := collections[action.CollectionID].DeleteWithKey(s.ctx, doc.Key()) return err }, ) - expectedErrorRaised = AssertError(t, testCase.Description, err, action.ExpectedError) + expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError) } - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) } // updateDoc updates a document using the collection api. func updateDoc( - ctx context.Context, - t *testing.T, - testCase TestCase, - nodes []*node.Node, - nodeCollections [][]client.Collection, - documents [][]*client.Document, + s *state, action UpdateDoc, ) { - doc := documents[action.CollectionID][action.DocID] + doc := s.documents[action.CollectionID][action.DocID] err := doc.SetWithJSON([]byte(action.Doc)) - if AssertError(t, testCase.Description, err, action.ExpectedError) { + if AssertError(s.t, s.testCase.Description, err, action.ExpectedError) { return } var expectedErrorRaised bool - actionNodes := getNodes(action.NodeID, nodes) - for nodeID, collections := range getNodeCollections(action.NodeID, nodeCollections) { + actionNodes := getNodes(action.NodeID, s.nodes) + for nodeID, collections := range getNodeCollections(action.NodeID, s.collections) { err := withRetry( actionNodes, nodeID, - func() error { return collections[action.CollectionID].Save(ctx, doc) }, + func() error { return collections[action.CollectionID].Save(s.ctx, doc) }, ) - expectedErrorRaised = AssertError(t, testCase.Description, err, action.ExpectedError) + expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError) } - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) +} + +// createIndex creates a secondary index using the collection api. +func createIndex( + s *state, + action CreateIndex, +) { + if action.CollectionID >= len(s.indexes) { + // Expand the slice if required, so that the index can be accessed by collection index + s.indexes = append(s.indexes, + make([][][]client.IndexDescription, action.CollectionID-len(s.indexes)+1)...) + } + actionNodes := getNodes(action.NodeID, s.nodes) + for nodeID, collections := range getNodeCollections(action.NodeID, s.collections) { + indexDesc := client.IndexDescription{ + Name: action.IndexName, + } + if action.FieldName != "" { + indexDesc.Fields = []client.IndexedFieldDescription{ + { + Name: action.FieldName, + }, + } + } else if len(action.FieldsNames) > 0 { + for i := range action.FieldsNames { + indexDesc.Fields = append(indexDesc.Fields, client.IndexedFieldDescription{ + Name: action.FieldsNames[i], + Direction: action.Directions[i], + }) + } + } + err := withRetry( + actionNodes, + nodeID, + func() error { + desc, err := collections[action.CollectionID].CreateIndex(s.ctx, indexDesc) + if err != nil { + return err + } + s.indexes[nodeID][action.CollectionID] = + append(s.indexes[nodeID][action.CollectionID], desc) + return nil + }, + ) + if AssertError(s.t, s.testCase.Description, err, action.ExpectedError) { + return + } + } + + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, false) +} + +// dropIndex drops the secondary index using the collection api. +func dropIndex( + s *state, + action DropIndex, +) { + var expectedErrorRaised bool + actionNodes := getNodes(action.NodeID, s.nodes) + for nodeID, collections := range getNodeCollections(action.NodeID, s.collections) { + indexName := action.IndexName + if indexName == "" { + indexName = s.indexes[nodeID][action.CollectionID][action.IndexID].Name + } + + err := withRetry( + actionNodes, + nodeID, + func() error { + return collections[action.CollectionID].DropIndex(s.ctx, indexName) + }, + ) + expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + } + + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) +} + +// backupExport generates a backup using the db api. +func backupExport( + s *state, + action BackupExport, +) { + if action.Config.Filepath == "" { + action.Config.Filepath = s.t.TempDir() + "/test.json" + } + + var expectedErrorRaised bool + actionNodes := getNodes(action.NodeID, s.nodes) + for nodeID, node := range actionNodes { + err := withRetry( + actionNodes, + nodeID, + func() error { return node.DB.BasicExport(s.ctx, &action.Config) }, + ) + expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + + if !expectedErrorRaised { + assertBackupContent(s.t, action.ExpectedContent, action.Config.Filepath) + } + } + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) +} + +// backupImport imports data from a backup using the db api. +func backupImport( + s *state, + action BackupImport, +) { + if action.Filepath == "" { + action.Filepath = s.t.TempDir() + "/test.json" + } + + // we can avoid checking the error here as this would mean the filepath is invalid + // and we want to make sure that `BasicImport` fails in this case. + _ = os.WriteFile(action.Filepath, []byte(action.ImportContent), 0664) + + var expectedErrorRaised bool + actionNodes := getNodes(action.NodeID, s.nodes) + for nodeID, node := range actionNodes { + err := withRetry( + actionNodes, + nodeID, + func() error { return node.DB.BasicImport(s.ctx, action.Filepath) }, + ) + expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + } + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) } // withRetry attempts to perform the given action, retrying up to a DB-defined @@ -941,7 +1218,7 @@ func updateDoc( // about this in our tests so we just retry a few times until it works (or the // retry limit is breached - important incase this is a different error) func withRetry( - nodes []*node.Node, + nodes []*net.Node, nodeID int, action func() error, ) error { @@ -956,58 +1233,35 @@ func withRetry( return nil } -// executeTransactionRequest executes the given transactional request. -// -// It will create and cache a new transaction if it is the first of the given -// TransactionId. If an error is returned the transaction will be discarded before -// this function returns. -func executeTransactionRequest( - ctx context.Context, - t *testing.T, +func getStore( + s *state, db client.DB, - txns []datastore.Txn, - testCase TestCase, - action TransactionRequest2, -) []datastore.Txn { - if action.TransactionID >= len(txns) { + transactionSpecifier immutable.Option[int], + expectedError string, +) client.Store { + if !transactionSpecifier.HasValue() { + return db + } + + transactionID := transactionSpecifier.Value() + + if transactionID >= len(s.txns) { // Extend the txn slice so this txn can fit and be accessed by TransactionId - txns = append(txns, make([]datastore.Txn, action.TransactionID-len(txns)+1)...) + s.txns = append(s.txns, make([]datastore.Txn, transactionID-len(s.txns)+1)...) } - if txns[action.TransactionID] == nil { + if s.txns[transactionID] == nil { // Create a new transaction if one does not already exist. - txn, err := db.NewTxn(ctx, false) - if AssertError(t, testCase.Description, err, action.ExpectedError) { - txn.Discard(ctx) + txn, err := db.NewTxn(s.ctx, false) + if AssertError(s.t, s.testCase.Description, err, expectedError) { + txn.Discard(s.ctx) return nil } - txns[action.TransactionID] = txn + s.txns[transactionID] = txn } - result := db.WithTxn(txns[action.TransactionID]).ExecRequest(ctx, action.Request) - expectedErrorRaised := assertRequestResults( - ctx, - t, - testCase.Description, - &result.GQL, - action.Results, - action.ExpectedError, - // anyof is not yet supported by transactional requests - 0, - map[docFieldKey][]any{}, - ) - - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) - - if expectedErrorRaised { - // Make sure to discard the transaction before exit, else an unwanted error - // may surface later (e.g. on database close). - txns[action.TransactionID].Discard(ctx) - return nil - } - - return txns + return db.WithTxn(s.txns[transactionID]) } // commitTransaction commits the given transaction. @@ -1015,39 +1269,34 @@ func executeTransactionRequest( // Will panic if the given transaction does not exist. Discards the transaction if // an error is returned on commit. func commitTransaction( - ctx context.Context, - t *testing.T, - txns []datastore.Txn, - testCase TestCase, + s *state, action TransactionCommit, ) { - err := txns[action.TransactionID].Commit(ctx) + err := s.txns[action.TransactionID].Commit(s.ctx) if err != nil { - txns[action.TransactionID].Discard(ctx) + s.txns[action.TransactionID].Discard(s.ctx) } - expectedErrorRaised := AssertError(t, testCase.Description, err, action.ExpectedError) + expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) } // executeRequest executes the given request. func executeRequest( - ctx context.Context, - t *testing.T, - nodes []*node.Node, - testCase TestCase, + s *state, action Request, ) { var expectedErrorRaised bool - for nodeID, node := range getNodes(action.NodeID, nodes) { - result := node.DB.ExecRequest(ctx, action.Request) + for nodeID, node := range getNodes(action.NodeID, s.nodes) { + db := getStore(s, node.DB, action.TransactionID, action.ExpectedError) + result := db.ExecRequest(s.ctx, action.Request) anyOfByFieldKey := map[docFieldKey][]any{} expectedErrorRaised = assertRequestResults( - ctx, - t, - testCase.Description, + s.ctx, + s.t, + s.testCase.Description, &result.GQL, action.Results, action.ExpectedError, @@ -1056,7 +1305,7 @@ func executeRequest( ) } - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) } // executeSubscriptionRequest executes the given subscription request, returning @@ -1068,73 +1317,71 @@ func executeRequest( // failures are recorded properly. It will only yield once, once // the subscription has terminated. func executeSubscriptionRequest( - ctx context.Context, - t *testing.T, - allActionsDone chan struct{}, - db client.DB, - testCase TestCase, + s *state, action SubscriptionRequest, -) (chan func(), bool) { +) { subscriptionAssert := make(chan func()) - result := db.ExecRequest(ctx, action.Request) - if AssertErrors(t, testCase.Description, result.GQL.Errors, action.ExpectedError) { - return nil, true - } - - go func() { - data := []map[string]any{} - errs := []error{} - - allActionsAreDone := false - expectedDataRecieved := len(action.Results) == 0 - stream := result.Pub.Stream() - for { - select { - case s := <-stream: - sResult, _ := s.(client.GQLResult) - sData, _ := sResult.Data.([]map[string]any) - errs = append(errs, sResult.Errors...) - data = append(data, sData...) - - if len(data) >= len(action.Results) { - expectedDataRecieved = true - } - - case <-allActionsDone: - allActionsAreDone = true - } + for _, node := range getNodes(action.NodeID, s.nodes) { + result := node.DB.ExecRequest(s.ctx, action.Request) + if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) { + return + } - if expectedDataRecieved && allActionsAreDone { - finalResult := &client.GQLResult{ - Data: data, - Errors: errs, + go func() { + data := []map[string]any{} + errs := []error{} + + allActionsAreDone := false + expectedDataRecieved := len(action.Results) == 0 + stream := result.Pub.Stream() + for { + select { + case s := <-stream: + sResult, _ := s.(client.GQLResult) + sData, _ := sResult.Data.([]map[string]any) + errs = append(errs, sResult.Errors...) + data = append(data, sData...) + + if len(data) >= len(action.Results) { + expectedDataRecieved = true + } + + case <-s.allActionsDone: + allActionsAreDone = true } - subscriptionAssert <- func() { - // This assert should be executed from the main test routine - // so that failures will be properly handled. - expectedErrorRaised := assertRequestResults( - ctx, - t, - testCase.Description, - finalResult, - action.Results, - action.ExpectedError, - // anyof is not yet supported by subscription requests - 0, - map[docFieldKey][]any{}, - ) - - assertExpectedErrorRaised(t, testCase.Description, action.ExpectedError, expectedErrorRaised) + if expectedDataRecieved && allActionsAreDone { + finalResult := &client.GQLResult{ + Data: data, + Errors: errs, + } + + subscriptionAssert <- func() { + // This assert should be executed from the main test routine + // so that failures will be properly handled. + expectedErrorRaised := assertRequestResults( + s.ctx, + s.t, + s.testCase.Description, + finalResult, + action.Results, + action.ExpectedError, + // anyof is not yet supported by subscription requests + 0, + map[docFieldKey][]any{}, + ) + + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) + } + + return } - - return } - } - }() + }() + } - return subscriptionAssert, false + s.subscriptionResultsChans = append(s.subscriptionResultsChans, subscriptionAssert) } // Asserts as to whether an error has been raised as expected (or not). If an expected @@ -1145,7 +1392,7 @@ func AssertError(t *testing.T, description string, err error, expectedError stri } if expectedError == "" { - assert.NoError(t, err, description) + require.NoError(t, err, description) return false } else { if !strings.Contains(err.Error(), expectedError) { @@ -1165,14 +1412,14 @@ func AssertErrors( expectedError string, ) bool { if expectedError == "" { - assert.Empty(t, errs, description) + require.Empty(t, errs, description) } else { for _, e := range errs { // This is always a string at the moment, add support for other types as and when needed errorString := e.Error() if !strings.Contains(errorString, expectedError) { // We use ErrorIs for clearer failures (is a error comparison even if it is just a string) - assert.ErrorIs(t, errors.New(errorString), errors.New(expectedError)) + require.ErrorIs(t, errors.New(errorString), errors.New(expectedError)) continue } return true @@ -1247,30 +1494,29 @@ func assertExpectedErrorRaised(t *testing.T, description string, expectedError s } func assertIntrospectionResults( - ctx context.Context, - t *testing.T, - description string, - db client.DB, + s *state, action IntrospectionRequest, ) bool { - result := db.ExecRequest(ctx, action.Request) + for _, node := range getNodes(action.NodeID, s.nodes) { + result := node.DB.ExecRequest(s.ctx, action.Request) - if AssertErrors(t, description, result.GQL.Errors, action.ExpectedError) { - return true - } - resultantData := result.GQL.Data.(map[string]any) + if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) { + return true + } + resultantData := result.GQL.Data.(map[string]any) - if len(action.ExpectedData) == 0 && len(action.ContainsData) == 0 { - require.Equal(t, action.ExpectedData, resultantData) - } + if len(action.ExpectedData) == 0 && len(action.ContainsData) == 0 { + require.Equal(s.t, action.ExpectedData, resultantData) + } - if len(action.ExpectedData) == 0 && len(action.ContainsData) > 0 { - assertContains(t, action.ContainsData, resultantData) - } else { - require.Equal(t, len(action.ExpectedData), len(resultantData)) + if len(action.ExpectedData) == 0 && len(action.ContainsData) > 0 { + assertContains(s.t, action.ContainsData, resultantData) + } else { + require.Equal(s.t, len(action.ExpectedData), len(resultantData)) - for k, result := range resultantData { - assert.Equal(t, action.ExpectedData[k], result) + for k, result := range resultantData { + assert.Equal(s.t, action.ExpectedData[k], result) + } } } @@ -1279,47 +1525,46 @@ func assertIntrospectionResults( // Asserts that the client introspection results conform to our expectations. func assertClientIntrospectionResults( - ctx context.Context, - t *testing.T, - description string, - db client.DB, + s *state, action ClientIntrospectionRequest, ) bool { - result := db.ExecRequest(ctx, action.Request) - - if AssertErrors(t, description, result.GQL.Errors, action.ExpectedError) { - return true - } - resultantData := result.GQL.Data.(map[string]any) - - if len(resultantData) == 0 { - return false - } + for _, node := range getNodes(action.NodeID, s.nodes) { + result := node.DB.ExecRequest(s.ctx, action.Request) - // Iterate through all types, validating each type definition. - // Inspired from buildClientSchema.ts from graphql-js, - // which is one way that clients do validate the schema. - types := resultantData["__schema"].(map[string]any)["types"].([]any) + if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) { + return true + } + resultantData := result.GQL.Data.(map[string]any) - for _, typeData := range types { - typeDef := typeData.(map[string]any) - kind := typeDef["kind"].(string) + if len(resultantData) == 0 { + return false + } - switch kind { - case "SCALAR", "INTERFACE", "UNION", "ENUM": - // No validation for these types in this test - case "OBJECT": - fields := typeDef["fields"] - if fields == nil { - t.Errorf("Fields are missing for OBJECT type %v", typeDef["name"]) - } - case "INPUT_OBJECT": - inputFields := typeDef["inputFields"] - if inputFields == nil { - t.Errorf("InputFields are missing for INPUT_OBJECT type %v", typeDef["name"]) + // Iterate through all types, validating each type definition. + // Inspired from buildClientSchema.ts from graphql-js, + // which is one way that clients do validate the schema. + types := resultantData["__schema"].(map[string]any)["types"].([]any) + + for _, typeData := range types { + typeDef := typeData.(map[string]any) + kind := typeDef["kind"].(string) + + switch kind { + case "SCALAR", "INTERFACE", "UNION", "ENUM": + // No validation for these types in this test + case "OBJECT": + fields := typeDef["fields"] + if fields == nil { + s.t.Errorf("Fields are missing for OBJECT type %v", typeDef["name"]) + } + case "INPUT_OBJECT": + inputFields := typeDef["inputFields"] + if inputFields == nil { + s.t.Errorf("InputFields are missing for INPUT_OBJECT type %v", typeDef["name"]) + } + default: + // t.Errorf("Unknown type kind: %v", kind) } - default: - // t.Errorf("Unknown type kind: %v", kind) } } @@ -1358,3 +1603,13 @@ func assertContains(t *testing.T, contains map[string]any, actual map[string]any } } } + +func assertBackupContent(t *testing.T, expectedContent, filepath string) { + b, err := os.ReadFile(filepath) + assert.NoError(t, err) + assert.Equal( + t, + expectedContent, + string(b), + ) +} diff --git a/tests/lenses/LICENSE b/tests/lenses/LICENSE new file mode 100644 index 0000000000..4e711136dc --- /dev/null +++ b/tests/lenses/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Source Network + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/tests/lenses/Makefile b/tests/lenses/Makefile new file mode 100644 index 0000000000..7370a04b80 --- /dev/null +++ b/tests/lenses/Makefile @@ -0,0 +1,4 @@ +build: + cargo build --target wasm32-unknown-unknown --manifest-path "./rust_wasm32_set_default/Cargo.toml" + cargo build --target wasm32-unknown-unknown --manifest-path "./rust_wasm32_remove/Cargo.toml" + cargo build --target wasm32-unknown-unknown --manifest-path "./rust_wasm32_copy/Cargo.toml" diff --git a/tests/lenses/rust_wasm32_copy/Cargo.toml b/tests/lenses/rust_wasm32_copy/Cargo.toml new file mode 100644 index 0000000000..af081daf34 --- /dev/null +++ b/tests/lenses/rust_wasm32_copy/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "rust-wasm32-copy" +version = "0.1.0" +edition = "2018" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0.87" +lens_sdk = { version = "0.1.0", git = "https://github.com/lens-vm/lens.git" } + +[http] +multiplexing = false diff --git a/tests/lenses/rust_wasm32_copy/src/lib.rs b/tests/lenses/rust_wasm32_copy/src/lib.rs new file mode 100644 index 0000000000..2f2b407481 --- /dev/null +++ b/tests/lenses/rust_wasm32_copy/src/lib.rs @@ -0,0 +1,91 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::collections::HashMap; +use std::sync::RwLock; +use std::error::Error; +use std::{fmt, error}; +use serde::Deserialize; + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +enum ModuleError { + ParametersNotSetError, + PropertyNotFoundError{requested: String}, +} + +impl error::Error for ModuleError { } + +impl fmt::Display for ModuleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match &*self { + ModuleError::ParametersNotSetError => f.write_str("Parameters have not been set."), + ModuleError::PropertyNotFoundError { requested } => + write!(f, "The requested property was not found. Requested: {}", requested), + } + } +} + +#[derive(Deserialize, Clone)] +pub struct Parameters { + pub src: String, + pub dst: String, +} + +static PARAMETERS: RwLock> = RwLock::new(None); + +#[no_mangle] +pub extern fn alloc(size: usize) -> *mut u8 { + lens_sdk::alloc(size) +} + +#[no_mangle] +pub extern fn set_param(ptr: *mut u8) -> *mut u8 { + match try_set_param(ptr) { + Ok(_) => lens_sdk::nil_ptr(), + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_set_param(ptr: *mut u8) -> Result<(), Box> { + let parameter = lens_sdk::try_from_mem::(ptr)? + .ok_or(ModuleError::ParametersNotSetError)?; + + let mut dst = PARAMETERS.write()?; + *dst = Some(parameter); + Ok(()) +} + +#[no_mangle] +pub extern fn transform(ptr: *mut u8) -> *mut u8 { + match try_transform(ptr) { + Ok(o) => match o { + Some(result_json) => lens_sdk::to_mem(lens_sdk::JSON_TYPE_ID, &result_json), + None => lens_sdk::nil_ptr(), + }, + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_transform(ptr: *mut u8) -> Result>, Box> { + let mut input = match lens_sdk::try_from_mem::>(ptr)? { + Some(v) => v, + // Implementations of `transform` are free to handle nil however they like. In this + // implementation we chose to return nil given a nil input. + None => return Ok(None), + }; + + let params = PARAMETERS.read()? + .clone() + .ok_or(ModuleError::ParametersNotSetError)? + .clone(); + + let value = input.get_mut(¶ms.src) + .ok_or(ModuleError::PropertyNotFoundError{requested: params.src.clone()})? + .clone(); + + input.insert(params.dst, value); + + let result_json = serde_json::to_vec(&input)?; + Ok(Some(result_json)) +} diff --git a/tests/lenses/rust_wasm32_remove/Cargo.toml b/tests/lenses/rust_wasm32_remove/Cargo.toml new file mode 100644 index 0000000000..561cad7140 --- /dev/null +++ b/tests/lenses/rust_wasm32_remove/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "rust-wasm32-remove" +version = "0.1.0" +edition = "2018" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0.87" +lens_sdk = { version = "0.1.0", git = "https://github.com/lens-vm/lens.git" } + +[http] +multiplexing = false diff --git a/tests/lenses/rust_wasm32_remove/src/lib.rs b/tests/lenses/rust_wasm32_remove/src/lib.rs new file mode 100644 index 0000000000..5bf2edb15f --- /dev/null +++ b/tests/lenses/rust_wasm32_remove/src/lib.rs @@ -0,0 +1,79 @@ +use std::collections::HashMap; +use std::sync::RwLock; +use std::error::Error; +use std::{fmt, error}; +use serde::Deserialize; + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +enum ModuleError { + ParametersNotSetError, +} + +impl error::Error for ModuleError { } + +impl fmt::Display for ModuleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match &*self { + ModuleError::ParametersNotSetError => f.write_str("Parameters have not been set."), + } + } +} + +#[derive(Deserialize, Clone)] +pub struct Parameters { + pub target: String, +} + +static PARAMETERS: RwLock> = RwLock::new(None); + +#[no_mangle] +pub extern fn alloc(size: usize) -> *mut u8 { + lens_sdk::alloc(size) +} + +#[no_mangle] +pub extern fn set_param(ptr: *mut u8) -> *mut u8 { + match try_set_param(ptr) { + Ok(_) => lens_sdk::nil_ptr(), + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_set_param(ptr: *mut u8) -> Result<(), Box> { + let parameter = lens_sdk::try_from_mem::(ptr)? + .ok_or(ModuleError::ParametersNotSetError)?; + + let mut dst = PARAMETERS.write()?; + *dst = Some(parameter); + Ok(()) +} + +#[no_mangle] +pub extern fn transform(ptr: *mut u8) -> *mut u8 { + match try_transform(ptr) { + Ok(o) => match o { + Some(result_json) => lens_sdk::to_mem(lens_sdk::JSON_TYPE_ID, &result_json), + None => lens_sdk::nil_ptr(), + }, + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_transform(ptr: *mut u8) -> Result>, Box> { + let mut input = match lens_sdk::try_from_mem::>(ptr)? { + Some(v) => v, + // Implementations of `transform` are free to handle nil however they like. In this + // implementation we chose to return nil given a nil input. + None => return Ok(None), + }; + + let params = PARAMETERS.read()? + .clone() + .ok_or(ModuleError::ParametersNotSetError)? + .clone(); + + input.remove(¶ms.target); + + let result_json = serde_json::to_vec(&input.clone())?; + Ok(Some(result_json)) +} diff --git a/tests/lenses/rust_wasm32_set_default/Cargo.toml b/tests/lenses/rust_wasm32_set_default/Cargo.toml new file mode 100644 index 0000000000..1798d3146b --- /dev/null +++ b/tests/lenses/rust_wasm32_set_default/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "rust-wasm32-set-default" +version = "0.1.0" +edition = "2018" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0.87" +lens_sdk = { version = "0.1.0", git = "https://github.com/lens-vm/lens.git" } + +[http] +multiplexing = false diff --git a/tests/lenses/rust_wasm32_set_default/src/lib.rs b/tests/lenses/rust_wasm32_set_default/src/lib.rs new file mode 100644 index 0000000000..fec61e422e --- /dev/null +++ b/tests/lenses/rust_wasm32_set_default/src/lib.rs @@ -0,0 +1,80 @@ +use std::collections::HashMap; +use std::sync::RwLock; +use std::error::Error; +use std::{fmt, error}; +use serde::Deserialize; + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] +enum ModuleError { + ParametersNotSetError, +} + +impl error::Error for ModuleError { } + +impl fmt::Display for ModuleError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match &*self { + ModuleError::ParametersNotSetError => f.write_str("Parameters have not been set."), + } + } +} + +#[derive(Deserialize, Clone)] +pub struct Parameters { + pub dst: String, + pub value: serde_json::Value, +} + +static PARAMETERS: RwLock> = RwLock::new(None); + +#[no_mangle] +pub extern fn alloc(size: usize) -> *mut u8 { + lens_sdk::alloc(size) +} + +#[no_mangle] +pub extern fn set_param(ptr: *mut u8) -> *mut u8 { + match try_set_param(ptr) { + Ok(_) => lens_sdk::nil_ptr(), + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_set_param(ptr: *mut u8) -> Result<(), Box> { + let parameter = lens_sdk::try_from_mem::(ptr)? + .ok_or(ModuleError::ParametersNotSetError)?; + + let mut dst = PARAMETERS.write()?; + *dst = Some(parameter); + Ok(()) +} + +#[no_mangle] +pub extern fn transform(ptr: *mut u8) -> *mut u8 { + match try_transform(ptr) { + Ok(o) => match o { + Some(result_json) => lens_sdk::to_mem(lens_sdk::JSON_TYPE_ID, &result_json), + None => lens_sdk::nil_ptr(), + }, + Err(e) => lens_sdk::to_mem(lens_sdk::ERROR_TYPE_ID, &e.to_string().as_bytes()) + } +} + +fn try_transform(ptr: *mut u8) -> Result>, Box> { + let mut input = match lens_sdk::try_from_mem::>(ptr)? { + Some(v) => v, + // Implementations of `transform` are free to handle nil however they like. In this + // implementation we chose to return nil given a nil input. + None => return Ok(None), + }; + + let params = PARAMETERS.read()? + .clone() + .ok_or(ModuleError::ParametersNotSetError)? + .clone(); + + input.insert(params.dst, params.value); + + let result_json = serde_json::to_vec(&input.clone())?; + Ok(Some(result_json)) +} diff --git a/tests/lenses/utils.go b/tests/lenses/utils.go new file mode 100644 index 0000000000..cfb066db81 --- /dev/null +++ b/tests/lenses/utils.go @@ -0,0 +1,49 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package lenses + +import ( + "path" + "runtime" +) + +// SetDefaultModulePath is the path to the `SetDefault` lens module compiled to wasm. +// +// The module has two parameters: +// - `dst` is a string and is the name of the property you wish to set +// - `value` can be any valid json value and is the value that you wish the `dst` property +// of all documents being transformed by this module to have. +var SetDefaultModulePath string = getPathRelativeToProjectRoot( + "/tests/lenses/rust_wasm32_set_default/target/wasm32-unknown-unknown/debug/rust_wasm32_set_default.wasm", +) + +// RemoveModulePath is the path to the `Remove` lens module compiled to wasm. +// +// The module has one parameter: +// - `target` is a string and is the name of the property you wish to remove. +var RemoveModulePath string = getPathRelativeToProjectRoot( + "/tests/lenses/rust_wasm32_remove/target/wasm32-unknown-unknown/debug/rust_wasm32_remove.wasm", +) + +// CopyModulePath is the path to the `Copy` lens module compiled to wasm. +// +// The module has two parameters: +// - `src` is a string and is the name of the property you wish to copy values from. +// - `dst` is a string and is the name of the property you wish to copy the `src` value to. +var CopyModulePath string = getPathRelativeToProjectRoot( + "/tests/lenses/rust_wasm32_copy/target/wasm32-unknown-unknown/debug/rust_wasm32_copy.wasm", +) + +func getPathRelativeToProjectRoot(relativePath string) string { + _, filename, _, _ := runtime.Caller(0) + root := path.Dir(path.Dir(path.Dir(filename))) + return path.Join(root, relativePath) +} diff --git a/tests/lenses/verified_true.json b/tests/lenses/verified_true.json new file mode 100644 index 0000000000..2091620658 --- /dev/null +++ b/tests/lenses/verified_true.json @@ -0,0 +1,11 @@ +{ + "lenses": [ + { + "path": "/tests/modules/rust_wasm32rust_wasm32_set_default_simple/target/wasm32-unknown-unknown/debug/rust_wasm32_set_default.wasm", + "arguments": { + "dst": "verified", + "value": true + } + } + ] +} diff --git a/tools/cloud/aws/packer/build_aws_ami.pkr.hcl b/tools/cloud/aws/packer/build_aws_ami.pkr.hcl index 36b6d5a753..8afacfb339 100644 --- a/tools/cloud/aws/packer/build_aws_ami.pkr.hcl +++ b/tools/cloud/aws/packer/build_aws_ami.pkr.hcl @@ -66,8 +66,8 @@ build { inline = [ "/usr/bin/cloud-init status --wait", "sudo apt-get update && sudo apt-get install make build-essential -y", - "curl -OL https://golang.org/dl/go1.19.8.linux-amd64.tar.gz", - "rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.19.8.linux-amd64.tar.gz", + "curl -OL https://golang.org/dl/go1.20.6.linux-amd64.tar.gz", + "rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.20.6.linux-amd64.tar.gz", "export PATH=$PATH:/usr/local/go/bin", "git clone \"https://git@$DEFRADB_GIT_REPO\"", "cd ./defradb || { printf \"\\\ncd into defradb failed.\\\n\" && exit 2; }", diff --git a/tools/configs/golangci.yaml b/tools/configs/golangci.yaml index 02c5c1d694..448d334596 100644 --- a/tools/configs/golangci.yaml +++ b/tools/configs/golangci.yaml @@ -57,7 +57,7 @@ run: # Define the Go version limit. # Default: use Go version from the go.mod file, fallback on the env var `GOVERSION`. - go: "1.19" + go: "1.20" #=====================================================================================[ Output Configuration Options ] output: @@ -263,7 +263,7 @@ linters-settings: gosimple: # Select the Go version to target. - go: "1.19" + go: "1.20" # https://staticcheck.io/docs/options#checks checks: ["all", "-S1038"] # Turn on all except (these are disabled): @@ -355,13 +355,13 @@ linters-settings: staticcheck: # Select the Go version to target. - go: "1.19" + go: "1.20" # https://staticcheck.io/docs/options#checks checks: ["all"] unused: # Select the Go version to target. - go: "1.19" + go: "1.20" whitespace: # Enforces newlines (or comments) after every multi-line if statement. diff --git a/tools/defradb-builder.containerfile b/tools/defradb-builder.containerfile deleted file mode 100644 index 3daadb9b61..0000000000 --- a/tools/defradb-builder.containerfile +++ /dev/null @@ -1,10 +0,0 @@ -# syntax=docker/dockerfile:1 - -# An image with defradb's code and go tooling available, to assemble in a larger container. - -FROM docker.io/golang:1.19 AS BUILD - -WORKDIR /lib/defradb/ - -COPY . . -RUN make deps:modules diff --git a/tools/defradb.containerfile b/tools/defradb.containerfile index 77b5bc9bb1..c2cbaeb80e 100644 --- a/tools/defradb.containerfile +++ b/tools/defradb.containerfile @@ -4,7 +4,7 @@ # Stage: BUILD # Several steps are involved to enable caching and because of the behavior of COPY regarding directories. -FROM docker.io/golang:1.19 AS BUILD +FROM docker.io/golang:1.20 AS BUILD WORKDIR /repo/ COPY go.mod go.sum Makefile ./ RUN make deps:modules @@ -12,7 +12,7 @@ COPY . . RUN make build # Stage: RUN -FROM gcr.io/distroless/base-debian11 +FROM debian:bookworm-slim COPY --from=BUILD /repo/build/defradb /defradb # Documents which ports are normally used. diff --git a/tools/scripts/scripts_test.sh b/tools/scripts/scripts_test.sh index 327ac4f50e..71c24fa787 100755 --- a/tools/scripts/scripts_test.sh +++ b/tools/scripts/scripts_test.sh @@ -23,22 +23,33 @@ TestReturnCode() { # Test the script that is responsible for the validation of pr title. readonly T1="./validate-conventional-style.sh" + TestReturnCode "${T1}" 2; + TestReturnCode "${T1} 'chore: This title has everything valid except that its too long'" 3; TestReturnCode "${T1} 'bot Bump github.com/alternativesourcenetwork/defradb from 1.1.0.1.0.0 to 1.1.0.1.0.1'" 3; + TestReturnCode "${T1} 'chore: This title has more than one : colon'" 4; TestReturnCode "${T1} 'chore This title has no colon'" 4; TestReturnCode "${T1} 'bot Bump github.com/short/short from 1.2.3 to 1.2.4'" 4; + TestReturnCode "${T1} 'feat: a'" 5; TestReturnCode "${T1} 'feat: '" 5; TestReturnCode "${T1} 'feat:'" 5; + TestReturnCode "${T1} 'feat:There is no space between label & desc.'" 6; TestReturnCode "${T1} 'feat:there is no space between label & desc.'" 6; + TestReturnCode "${T1} 'ci: lowercase first character after label'" 7; + TestReturnCode "${T1} 'ci: Last character should not be period.'" 8; +TestReturnCode "${T1} 'ci(i): Last character should not be period.'" 8; TestReturnCode "${T1} 'ci: Last character is a space '" 8; TestReturnCode "${T1} 'ci: Last character is a \\\`tick\\\`'" 8; + TestReturnCode "${T1} 'bug: This is an invalid label'" 9; +TestReturnCode "${T1} 'bug(i): This is an invalid label'" 9; + TestReturnCode "${T1} 'ci: Last character is a number v1.5.0'" 0; TestReturnCode "${T1} 'ci: Last character is not lowercase alphabeT'" 0; TestReturnCode "${T1} 'chore: This is a valid title'" 0; @@ -51,3 +62,14 @@ TestReturnCode "${T1} 'refactor: This is a valid title'" 0; TestReturnCode "${T1} 'test: This is a valid title'" 0; TestReturnCode "${T1} 'tools: This is a valid title'" 0; TestReturnCode "${T1} 'bot: Bump github.com/alternativesourcenetwork/defradb from 1.1.0.1.0.0 to 1.1.0.1.0.1'" 0; +TestReturnCode "${T1} 'ci(i): Valid ignore title'" 0; +TestReturnCode "${T1} 'chore(i): Valid ignore title'" 0; +TestReturnCode "${T1} 'docs(i): Valid ignore title'" 0; +TestReturnCode "${T1} 'feat(i): Valid ignore title'" 0; +TestReturnCode "${T1} 'fix(i): Valid ignore title'" 0; +TestReturnCode "${T1} 'perf(i): Valid ignore title'" 0; +TestReturnCode "${T1} 'refactor(i): Valid ignore title'" 0; +TestReturnCode "${T1} 'test(i): Valid ignore title'" 0; +TestReturnCode "${T1} 'tools(i): Valid ignore title'" 0; +TestReturnCode "${T1} 'bot(i): Bump github.com/alternativesourcenetwork/defradb from 1.1.0.1.0.0 to 1.1.0.1.0.1'" 0; +TestReturnCode "${T1} 'bot(i): Bump githurk/defradb from 1.1.0.1.0.0 to 1.1.0.1.0.1'" 0; diff --git a/tools/scripts/validate-conventional-style.sh b/tools/scripts/validate-conventional-style.sh index 084af33c74..d19be1f00c 100755 --- a/tools/scripts/validate-conventional-style.sh +++ b/tools/scripts/validate-conventional-style.sh @@ -7,6 +7,9 @@ # Usage: ./validate-conventional-style.sh "feat: Add a new feature" #======================================================================================== +readonly BOT_LABEL="bot"; +readonly IGNORE_DECORATOR="(i)"; + # Declare a non-mutable indexed array that contains all the subset of conventional style # labels that we deem valid for our use case. There should always be insync with the # labels we have defined for the change log in: `defradb/tools/configs/chglog/config.yml`. @@ -19,33 +22,28 @@ readonly -a VALID_LABELS=("chore" "refactor" "test" "tools" - "bot"); - -BOTPREFIX="bot" + "${BOT_LABEL}"); if [ "${#}" -ne 1 ]; then printf "Error: Invalid number of arguments (pass title as 1 string argument).\n"; exit 2; fi -TITLE=${1}; -IS_BOT=false; +readonly TITLE="${1}"; -# Detect if title is prefixed with `bot` -if [[ "${TITLE}" =~ ^"${BOTPREFIX}:" ]]; then +# Detect if title is prefixed with `bot`, skips length validation if it is. +if [[ "${TITLE}" =~ ^"${BOT_LABEL}:" ]] || [[ "${TITLE}" =~ ^"${BOT_LABEL}${IGNORE_DECORATOR}:" ]]; then printf "Info: Title is from a bot, skipping length-related title validation.\n"; - IS_BOT=true; -fi # Validate that the entire length of the title is less than or equal to our character limit. -if [ "${#TITLE}" -gt 60 ] && [ "${IS_BOT}" = false ]; then +elif [[ "${#TITLE}" -gt 60 ]]; then printf "Error: The length of the title is too long (should be 60 or less).\n"; exit 3; fi # Split the title at ':' and store the result in ${SPLIT_TOKENS}. # Doing eval to ensure the split works for elements that contain spaces. -eval "SPLIT_TOKENS=($(echo "\"$TITLE\"" | sed 's/:/" "/g'))"; +eval "SPLIT_TOKENS=($(echo "\"${TITLE}\"" | sed 's/:/" "/g'))"; # Validate the `:` token exists exactly once. if [ "${#SPLIT_TOKENS[*]}" -ne 2 ]; then @@ -53,8 +51,8 @@ if [ "${#SPLIT_TOKENS[*]}" -ne 2 ]; then exit 4; fi -LABEL="${SPLIT_TOKENS[0]}"; -DESCRIPTION="${SPLIT_TOKENS[1]}"; +readonly LABEL="${SPLIT_TOKENS[0]}"; +readonly DESCRIPTION="${SPLIT_TOKENS[1]}"; printf "Info: label = [%s]\n" "${LABEL}"; printf "Info: description = [%s]\n" "${DESCRIPTION}"; @@ -65,9 +63,9 @@ if [ "${#DESCRIPTION}" -le 2 ]; then exit 5; fi -CHECK_SPACE="${DESCRIPTION::1}"; # First character -CHECK_FIRST_UPPER_CASE="${DESCRIPTION:1:1}"; # Second character -CHECK_LAST_VALID="${DESCRIPTION: -1}"; # Last character +readonly CHECK_SPACE="${DESCRIPTION::1}"; # First character +readonly CHECK_FIRST_UPPER_CASE="${DESCRIPTION:1:1}"; # Second character +readonly CHECK_LAST_VALID="${DESCRIPTION: -1}"; # Last character # Validate that there is a space between the label and description. if [ "${CHECK_SPACE}" != " " ]; then @@ -92,6 +90,9 @@ for validLabel in "${VALID_LABELS[@]}"; do if [ "${LABEL}" == "${validLabel}" ]; then printf "Success: Title's label and description style is valid.\n"; exit 0; + elif [ "${LABEL}" == "${validLabel}${IGNORE_DECORATOR}" ]; then + printf "Success: Title's label and description style is valid with ignore/internal decorator.\n"; + exit 0; fi done