diff --git a/nginx.conf b/nginx.conf index 8e170e494aeb..aa9a39ae2418 100644 --- a/nginx.conf +++ b/nginx.conf @@ -48,56 +48,107 @@ http { rewrite ^/docs/(?!(?:[a-zA-Z][a-zA-Z]|_next|img)(?:/|$))(.*)$ $scheme://$http_host/docs/en/$1 permanent; # Permanent redirects (301) - rewrite ^/docs/([a-zA-Z][a-zA-Z])/about/introduction/$ $scheme://$http_host/docs/$1/about/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/about/network/$ $scheme://$http_host/docs/$1/network/overview/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/curating/$ $scheme://$http_host/docs/$1/network/curating/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/delegating/$ $scheme://$http_host/docs/$1/network/delegating/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/network-transition-faq/$ $scheme://$http_host/docs/$1/arbitrum/arbitrum-faq/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/assemblyscript-api/$ $scheme://$http_host/docs/$1/developing/assemblyscript-api/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/assemblyscript-migration-guide/$ $scheme://$http_host/docs/$1/release-notes/assemblyscript-migration-guide/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/create-subgraph-hosted/$ $scheme://$http_host/docs/$1/developing/creating-a-subgraph/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/define-subgraph-hosted/$ $scheme://$http_host/docs/$1/developing/defining-a-subgraph/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/deprecating-a-subgraph/$ $scheme://$http_host/docs/$1/managing/deprecating-a-subgraph/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/developer-faq/$ $scheme://$http_host/docs/$1/developing/developer-faqs/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/distributed-systems/$ $scheme://$http_host/docs/$1/querying/distributed-systems/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/graphql-api/$ $scheme://$http_host/docs/$1/querying/graphql-api/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/matchstick/$ $scheme://$http_host/docs/$1/developing/unit-testing-framework/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/publish-subgraph/$ $scheme://$http_host/docs/$1/publishing/publishing-a-subgraph/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/query-the-graph/$ $scheme://$http_host/docs/$1/querying/querying-the-graph/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/querying-best-practices/$ $scheme://$http_host/docs/$1/querying/querying-best-practices/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/querying-from-your-app/$ $scheme://$http_host/docs/$1/querying/querying-from-an-application/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/quick-start/$ $scheme://$http_host/docs/$1/quick-start/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/subgraph-debug-forking/$ $scheme://$http_host/docs/$1/cookbook/subgraph-debug-forking/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/explorer/$ $scheme://$http_host/docs/$1/network/explorer/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/hosted-service/deploy-subgraph-hosted/$ $scheme://$http_host/docs/$1/deploying/deploying-a-subgraph-to-hosted/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/hosted-service/migrating-subgraph/$ $scheme://$http_host/docs/$1/cookbook/migrating-a-subgraph/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/hosted-service/query-hosted-service/$ $scheme://$http_host/docs/$1/querying/querying-the-hosted-service/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/hosted-service/what-is-hosted-service/$ $scheme://$http_host/docs/$1/deploying/hosted-service/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/indexing/$ $scheme://$http_host/docs/$1/network/indexing/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/billing/$ $scheme://$http_host/docs/$1/querying/billing/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/deploy-subgraph-studio/$ $scheme://$http_host/docs/$1/deploying/deploying-a-subgraph-to-studio/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/managing-api-keys/$ $scheme://$http_host/docs/$1/querying/managing-api-keys/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/multisig/$ $scheme://$http_host/docs/$1/cookbook/multisig/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/studio-faq/$ $scheme://$http_host/docs/$1/deploying/subgraph-studio-faqs/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/subgraph-studio/$ $scheme://$http_host/docs/$1/deploying/subgraph-studio/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/transferring-subgraph-ownership/$ $scheme://$http_host/docs/$1/managing/transferring-subgraph-ownership/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/supported-networks/arweave/$ $scheme://$http_host/docs/$1/cookbook/arweave/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/supported-networks/cosmos/$ $scheme://$http_host/docs/$1/cookbook/cosmos/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/supported-networks/near/$ $scheme://$http_host/docs/$1/cookbook/near/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/defining-a-subgraph/$ $scheme://$http_host/docs/$1/developing/creating-a-subgraph/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/arbitrum-faq/$ $scheme://$http_host/docs/$1/arbitrum/arbitrum-faq/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/quick-start/$ $scheme://$http_host/docs/$1/quick-start/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developer/assemblyscript-api/$ $scheme://$http_host/docs/$1/developing/graph-ts/api/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/assemblyscript-api/$ $scheme://$http_host/docs/$1/developing/graph-ts/api/ permanent; - rewrite ^/docs/en/substreams/(?!index\.).+$ https://substreams.streamingfast.io permanent; - rewrite ^/docs/en/firehose/(?!index\.).+$ https://firehose.streamingfast.io permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/deploying/deploying-a-subgraph-to-studio/$ $scheme://$http_host/docs/$1/deploying/deploy-using-subgraph-studio/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/deploying/subgraph-studio/$ $scheme://$http_host/docs/$1/deploying/deploy-using-subgraph-studio/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/managing/deprecate-a-subgraph/$ $scheme://$http_host/docs/$1/managing/delete-a-subgraph/ permanent; - rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/creating-a-subgraph/$ $scheme://$http_host/docs/$1/developing/creating-a-subgraph/starting-your-subgraph/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/about/introduction/$ $scheme://$http_host/docs/$1/about/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/arbitrum-faq/$ $scheme://$http_host/docs/$1/archived/arbitrum/arbitrum-faq/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/arbitrum/l2-transfer-tools-faq/$ $scheme://$http_host/docs/$1/archived/arbitrum/l2-transfer-tools-faq/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/arbitrum/l2-transfer-tools-guide/$ $scheme://$http_host/docs/$1/archived/arbitrum/l2-transfer-tools-guide/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/billing/$ $scheme://$http_host/docs/$1/subgraphs/billing/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/chain-integration-overview/$ $scheme://$http_host/docs/$1/indexing/chain-integration-overview/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/arweave/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/arweave/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/avoid-eth-calls/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/avoid-eth-calls/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/cosmos/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/cosmos/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/derivedfrom/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/derivedfrom/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/enums/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/enums/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/grafting-hotfix/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/grafting-hotfix/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/grafting/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/grafting/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/immutable-entities-bytes-as-ids/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/immutable-entities-bytes-as-ids/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/near/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/near/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/pruning/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/pruning/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/quick-start/$ $scheme://$http_host/docs/$1/subgraphs/quick-start/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/subgraph-debug-forking/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/subgraph-debug-forking/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/subgraph-uncrashable/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/subgraph-uncrashable/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/substreams-powered-subgraphs/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/substreams-powered-subgraphs/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/timeseries/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/timeseries/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/cookbook/transfer-to-the-graph/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/transfer-to-the-graph/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/curating/$ $scheme://$http_host/docs/$1/resources/roles/curating/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/delegating/$ $scheme://$http_host/docs/$1/resources/roles/delegating/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/deploying/deploy-using-subgraph-studio/$ $scheme://$http_host/docs/$1/subgraphs/developing/deploying/using-subgraph-studio/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/deploying/deploying-a-subgraph-to-studio/$ $scheme://$http_host/docs/$1/subgraphs/developing/deploying/using-subgraph-studio/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/deploying/multiple-networks/$ $scheme://$http_host/docs/$1/subgraphs/developing/deploying/multiple-networks/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/deploying/subgraph-studio-faqs/$ $scheme://$http_host/docs/$1/subgraphs/developing/deploying/subgraph-studio-faq/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/deploying/subgraph-studio/$ $scheme://$http_host/docs/$1/subgraphs/developing/deploying/using-subgraph-studio/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/assemblyscript-api/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/graph-ts/api/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/creating-a-subgraph/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/starting-your-subgraph/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/creating-a-subgraph/starting-your-subgraph/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/starting-your-subgraph/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/creating-a-subgraph/install-the-cli/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/install-the-cli/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/creating-a-subgraph/ql-schema/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/ql-schema/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/creating-a-subgraph/assemblyscript-mappings/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/assemblyscript-mappings/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/creating-a-subgraph/advanced/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/advanced/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/defining-a-subgraph/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/starting-your-subgraph/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/developer-faqs/$ $scheme://$http_host/docs/$1/subgraphs/developing/developer-faq/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/graph-ts/api/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/graph-ts/api/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/graph-ts/CHANGELOG/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/graph-ts/CHANGELOG/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/graph-ts/common-issues/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/graph-ts/common-issues/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/graph-ts/README/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/graph-ts/README/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/substreams-powered-subgraphs-faq/$ $scheme://$http_host/docs/$1/substreams/sps/sps-faq/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/supported-networks/$ $scheme://$http_host/docs/$1/supported-networks/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/developing/unit-testing-framework/$ $scheme://$http_host/docs/$1/subgraphs/developing/creating/unit-testing-framework/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/explorer/$ $scheme://$http_host/docs/$1/subgraphs/explorer/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/firehose/$ $scheme://$http_host/docs/$1/indexing/tooling/firehose/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/glossary/$ $scheme://$http_host/docs/$1/resources/glossary/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/graphcast/$ $scheme://$http_host/docs/$1/indexing/tooling/graphcast/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/indexing/$ $scheme://$http_host/docs/$1/indexing/overview/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/managing/delete-a-subgraph/$ $scheme://$http_host/docs/$1/subgraphs/developing/managing/deleting-a-subgraph/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/managing/deprecate-a-subgraph/$ $scheme://$http_host/docs/$1/subgraphs/developing/managing/deleting-a-subgraph/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/managing/transfer-a-subgraph/$ $scheme://$http_host/docs/$1/subgraphs/developing/managing/transferring-a-subgraph/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/network-transition-faq/$ $scheme://$http_host/docs/$1/archived/arbitrum/arbitrum-faq/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/network/benefits/$ $scheme://$http_host/docs/$1/resources/benefits/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/network/contracts/$ $scheme://$http_host/docs/$1/contracts/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/network/curating/$ $scheme://$http_host/docs/$1/resources/roles/curating/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/network/delegating/$ $scheme://$http_host/docs/$1/resources/roles/delegating/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/network/developing/$ $scheme://$http_host/docs/$1/subgraphs/developing/introduction/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/network/explorer/$ $scheme://$http_host/docs/$1/subgraphs/explorer/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/network/indexing/$ $scheme://$http_host/docs/$1/indexing/overview/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/new-chain-integration/$ $scheme://$http_host/docs/$1/indexing/new-chain-integration/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/operating-graph-node/$ $scheme://$http_host/docs/$1/indexing/tooling/graph-node/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/publishing/publishing-a-subgraph/$ $scheme://$http_host/docs/$1/subgraphs/developing/publishing/publishing-a-subgraph/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/distributed-systems/$ $scheme://$http_host/docs/$1/subgraphs/querying/distributed-systems/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/graph-client/README/$ $scheme://$http_host/docs/$1/subgraphs/querying/graph-client/README/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/graph-client/architecture/$ $scheme://$http_host/docs/$1/subgraphs/querying/graph-client/architecture/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/graph-client/live/$ $scheme://$http_host/docs/$1/subgraphs/querying/graph-client/live/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/graphql-api/$ $scheme://$http_host/docs/$1/subgraphs/querying/graphql-api/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/managing-api-keys/$ $scheme://$http_host/docs/$1/subgraphs/querying/managing-api-keys/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/querying-best-practices/$ $scheme://$http_host/docs/$1/subgraphs/querying/best-practices/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/querying-by-subgraph-id-vs-deployment-id/$ $scheme://$http_host/docs/$1/subgraphs/querying/subgraph-id-vs-deployment-id/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/querying-from-an-application/$ $scheme://$http_host/docs/$1/subgraphs/querying/from-an-application/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/querying-the-graph/$ $scheme://$http_host/docs/$1/subgraphs/querying/introduction/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/querying/querying-with-python/$ $scheme://$http_host/docs/$1/subgraphs/querying/python/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/quick-start/$ $scheme://$http_host/docs/$1/subgraphs/quick-start/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/release-notes/assemblyscript-migration-guide/$ $scheme://$http_host/docs/$1/resources/release-notes/assemblyscript-migration-guide/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/release-notes/graphql-validations-migration-guide/$ $scheme://$http_host/docs/$1/resources/release-notes/graphql-validations-migration-guide/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/sps/introduction/$ $scheme://$http_host/docs/$1/substreams/sps/introduction/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/sps/triggers-example/$ $scheme://$http_host/docs/$1/substreams/sps/tutorial/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/sps/triggers/$ $scheme://$http_host/docs/$1/substreams/sps/triggers/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/billing/$ $scheme://$http_host/docs/$1/subgraphs/billing/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/deploy-subgraph-studio/$ $scheme://$http_host/docs/$1/subgraphs/developing/deploying/using-subgraph-studio/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/managing-api-keys/$ $scheme://$http_host/docs/$1/subgraphs/querying/managing-api-keys/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/multisig/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/multisig/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/studio-faq/$ $scheme://$http_host/docs/$1/subgraphs/developing/deploying/subgraph-studio-faq/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/subgraph-studio/$ $scheme://$http_host/docs/$1/subgraphs/developing/deploying/using-subgraph-studio/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/studio/transferring-subgraph-ownership/$ $scheme://$http_host/docs/$1/subgraphs/developing/managing/transferring-a-subgraph/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/subgraphs/$ $scheme://$http_host/docs/$1/subgraphs/developing/subgraphs/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/substreams/$ $scheme://$http_host/docs/$1/substreams/introduction/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/sunrise/$ $scheme://$http_host/docs/$1/archived/sunrise/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/supported-network-requirements/$ $scheme://$http_host/docs/$1/indexing/supported-network-requirements/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/supported-networks/arweave/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/arweave/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/supported-networks/cosmos/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/cosmos/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/supported-networks/near/$ $scheme://$http_host/docs/$1/subgraphs/cookbook/near/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/tap/$ $scheme://$http_host/docs/$1/indexing/tap/ permanent; + rewrite ^/docs/([a-zA-Z][a-zA-Z])/tokenomics/$ $scheme://$http_host/docs/$1/resources/tokenomics/ permanent; + # Temporary redirects (302) - rewrite ^/docs/en/querying/graph-client/$ $scheme://$http_host/docs/en/querying/graph-client/README/ redirect; - rewrite ^/docs/en/developing/graph-ts/$ $scheme://$http_host/docs/en/developing/graph-ts/README/ redirect; + rewrite ^/docs/en/querying/graph-client/$ $scheme://$http_host/docs/en/subgraphs/querying/graph-client/README/ redirect; + rewrite ^/docs/en/developing/graph-ts/$ $scheme://$http_host/docs/en/subgraphs/developing/creating/graph-ts/README/ redirect; location / { try_files $uri $uri.html $uri/index.html =404; diff --git a/packages/nextra-theme/package.json b/packages/nextra-theme/package.json index bc410dd6d7c8..6a9f14556e32 100644 --- a/packages/nextra-theme/package.json +++ b/packages/nextra-theme/package.json @@ -35,7 +35,7 @@ "theme-ui": "^0.16" }, "dependencies": { - "@docsearch/react": "^3.8.1", + "@docsearch/react": "^3.8.2", "@radix-ui/react-collapsible": "^1.1.2", "@radix-ui/react-visually-hidden": "^1.1.1", "lodash": "^4.17.21", @@ -43,8 +43,8 @@ "react-use": "^17.6.0" }, "devDependencies": { - "@edgeandnode/gds": "^5.39.1", - "@edgeandnode/go": "^6.74.0", + "@edgeandnode/gds": "^5.39.2", + "@edgeandnode/go": "^6.75.0", "@emotion/react": "^11.14.0", "@types/lodash": "^4.17.13", "@types/react": "^18.3.17", diff --git a/packages/nextra-theme/src/index.tsx b/packages/nextra-theme/src/index.tsx index 4872f4a79e1b..9866ecdfb480 100644 --- a/packages/nextra-theme/src/index.tsx +++ b/packages/nextra-theme/src/index.tsx @@ -199,7 +199,9 @@ export default function NextraLayout({ children, pageOpts, pageProps }: NextraTh {args.activePath.map((item) => item.title).join(' > ')} ) : null} - {frontMatter.title ? {frontMatter.title} : null} + {frontMatter.title || args.activeIndex === 0 ? ( + {args.activeIndex === 0 ? 'The Graph Docs' : frontMatter.title} + ) : null} {lastUpdated || readingTime ? ( {lastUpdated ? ( diff --git a/packages/og-image/package.json b/packages/og-image/package.json index 14f0aebacb98..d12492d4b49d 100644 --- a/packages/og-image/package.json +++ b/packages/og-image/package.json @@ -16,12 +16,12 @@ "yoga-wasm-web": "0.3.3" }, "devDependencies": { - "@cloudflare/workers-types": "^4.20241216.0", + "@cloudflare/workers-types": "^4.20241218.0", "@types/react": "^18.3.17", "jest-image-snapshot": "^6.4.0", "tsx": "^4.19.2", "typescript": "^5.7.2", "vitest": "^1.6.0", - "wrangler": "^3.95.0" + "wrangler": "^3.97.0" } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 1acd88a0cacf..33fa06c0ea35 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -48,8 +48,8 @@ importers: packages/nextra-theme: dependencies: '@docsearch/react': - specifier: ^3.8.1 - version: 3.8.1(@algolia/client-search@5.17.1)(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(search-insights@2.17.3) + specifier: ^3.8.2 + version: 3.8.2(@algolia/client-search@5.18.0)(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(search-insights@2.17.3) '@radix-ui/react-collapsible': specifier: ^1.1.2 version: 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -67,11 +67,11 @@ importers: version: 17.6.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) devDependencies: '@edgeandnode/gds': - specifier: ^5.39.1 - version: 5.39.1(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) + specifier: ^5.39.2 + version: 5.39.2(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) '@edgeandnode/go': - specifier: ^6.74.0 - version: 6.74.0(k3a5ck5km3cbp2dg3adcgi6xwe) + specifier: ^6.75.0 + version: 6.75.0(sgicwllcqwwzqtazwqh6qnnbhy) '@emotion/react': specifier: ^11.14.0 version: 11.14.0(@types/react@18.3.17)(react@18.3.1) @@ -104,7 +104,7 @@ importers: version: 0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1) tsup: specifier: ^8.3.5 - version: 8.3.5(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.7.2)(yaml@2.6.1) + version: 8.3.5(jiti@1.21.7)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.7.2)(yaml@2.6.1) packages/og-image: dependencies: @@ -122,8 +122,8 @@ importers: version: 0.3.3 devDependencies: '@cloudflare/workers-types': - specifier: ^4.20241216.0 - version: 4.20241216.0 + specifier: ^4.20241218.0 + version: 4.20241218.0 '@types/react': specifier: ^18.3.17 version: 18.3.17 @@ -140,8 +140,8 @@ importers: specifier: ^1.6.0 version: 1.6.0(@types/node@22.10.2)(jsdom@24.1.3) wrangler: - specifier: ^3.95.0 - version: 3.95.0(@cloudflare/workers-types@4.20241216.0) + specifier: ^3.97.0 + version: 3.97.0(@cloudflare/workers-types@4.20241218.0) packages/remark-lint-restrict-elements: dependencies: @@ -155,14 +155,14 @@ importers: website: dependencies: '@edgeandnode/common': - specifier: ^6.38.0 - version: 6.38.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) + specifier: ^6.39.0 + version: 6.39.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) '@edgeandnode/gds': - specifier: ^5.39.1 - version: 5.39.1(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) + specifier: ^5.39.2 + version: 5.39.2(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) '@edgeandnode/go': - specifier: ^6.74.0 - version: 6.74.0(k3a5ck5km3cbp2dg3adcgi6xwe) + specifier: ^6.75.0 + version: 6.75.0(sgicwllcqwwzqtazwqh6qnnbhy) '@emotion/react': specifier: ^11.14.0 version: 11.14.0(@types/react@18.3.17)(react@18.3.1) @@ -176,8 +176,8 @@ importers: specifier: ^2.1.7 version: 2.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1) mixpanel-browser: - specifier: ^2.56.0 - version: 2.56.0 + specifier: ^2.57.1 + version: 2.57.1 next: specifier: ^14.2.20 version: 14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -234,8 +234,8 @@ importers: specifier: ^8.4.49 version: 8.4.49 tailwindcss: - specifier: ^3.4.16 - version: 3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) + specifier: ^3.4.17 + version: 3.4.17(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) tsx: specifier: ^4.19.2 version: 4.19.2 @@ -265,56 +265,56 @@ packages: '@algolia/client-search': '>= 4.9.1 < 6' algoliasearch: '>= 4.9.1 < 6' - '@algolia/client-abtesting@5.17.1': - resolution: {integrity: sha512-Os/xkQbDp5A5RdGYq1yS3fF69GoBJH5FIfrkVh+fXxCSe714i1Xdl9XoXhS4xG76DGKm6EFMlUqP024qjps8cg==} + '@algolia/client-abtesting@5.18.0': + resolution: {integrity: sha512-DLIrAukjsSrdMNNDx1ZTks72o4RH/1kOn8Wx5zZm8nnqFexG+JzY4SANnCNEjnFQPJTTvC+KpgiNW/CP2lumng==} engines: {node: '>= 14.0.0'} - '@algolia/client-analytics@5.17.1': - resolution: {integrity: sha512-WKpGC+cUhmdm3wndIlTh8RJXoVabUH+4HrvZHC4hXtvCYojEXYeep8RZstatwSZ7Ocg6Y2u67bLw90NEINuYEw==} + '@algolia/client-analytics@5.18.0': + resolution: {integrity: sha512-0VpGG2uQW+h2aejxbG8VbnMCQ9ary9/ot7OASXi6OjE0SRkYQ/+pkW+q09+IScif3pmsVVYggmlMPtAsmYWHng==} engines: {node: '>= 14.0.0'} - '@algolia/client-common@5.17.1': - resolution: {integrity: sha512-5rb5+yPIie6912riAypTSyzbE23a7UM1UpESvD8GEPI4CcWQvA9DBlkRNx9qbq/nJ5pvv8VjZjUxJj7rFkzEAA==} + '@algolia/client-common@5.18.0': + resolution: {integrity: sha512-X1WMSC+1ve2qlMsemyTF5bIjwipOT+m99Ng1Tyl36ZjQKTa54oajBKE0BrmM8LD8jGdtukAgkUhFoYOaRbMcmQ==} engines: {node: '>= 14.0.0'} - '@algolia/client-insights@5.17.1': - resolution: {integrity: sha512-nb/tfwBMn209TzFv1DDTprBKt/wl5btHVKoAww9fdEVdoKK02R2KAqxe5tuXLdEzAsS+LevRyOM/YjXuLmPtjQ==} + '@algolia/client-insights@5.18.0': + resolution: {integrity: sha512-FAJRNANUOSs/FgYOJ/Njqp+YTe4TMz2GkeZtfsw1TMiA5mVNRS/nnMpxas9771aJz7KTEWvK9GwqPs0K6RMYWg==} engines: {node: '>= 14.0.0'} - '@algolia/client-personalization@5.17.1': - resolution: {integrity: sha512-JuNlZe1SdW9KbV0gcgdsiVkFfXt0mmPassdS3cBSGvZGbPB9JsHthD719k5Y6YOY4dGvw1JmC1i9CwCQHAS8hg==} + '@algolia/client-personalization@5.18.0': + resolution: {integrity: sha512-I2dc94Oiwic3SEbrRp8kvTZtYpJjGtg5y5XnqubgnA15AgX59YIY8frKsFG8SOH1n2rIhUClcuDkxYQNXJLg+w==} engines: {node: '>= 14.0.0'} - '@algolia/client-query-suggestions@5.17.1': - resolution: {integrity: sha512-RBIFIv1QE3IlAikJKWTOpd6pwE4d2dY6t02iXH7r/SLXWn0HzJtsAPPeFg/OKkFvWAXt0H7In2/Mp7a1/Dy2pw==} + '@algolia/client-query-suggestions@5.18.0': + resolution: {integrity: sha512-x6XKIQgKFTgK/bMasXhghoEjHhmgoP61pFPb9+TaUJ32aKOGc65b12usiGJ9A84yS73UDkXS452NjyP50Knh/g==} engines: {node: '>= 14.0.0'} - '@algolia/client-search@5.17.1': - resolution: {integrity: sha512-bd5JBUOP71kPsxwDcvOxqtqXXVo/706NFifZ/O5Rx5GB8ZNVAhg4l7aGoT6jBvEfgmrp2fqPbkdIZ6JnuOpGcw==} + '@algolia/client-search@5.18.0': + resolution: {integrity: sha512-qI3LcFsVgtvpsBGR7aNSJYxhsR+Zl46+958ODzg8aCxIcdxiK7QEVLMJMZAR57jGqW0Lg/vrjtuLFDMfSE53qA==} engines: {node: '>= 14.0.0'} - '@algolia/ingestion@1.17.1': - resolution: {integrity: sha512-T18tvePi1rjRYcIKhd82oRukrPWHxG/Iy1qFGaxCplgRm9Im5z96qnYOq75MSKGOUHkFxaBKJOLmtn8xDR+Mcw==} + '@algolia/ingestion@1.18.0': + resolution: {integrity: sha512-bGvJg7HnGGm+XWYMDruZXWgMDPVt4yCbBqq8DM6EoaMBK71SYC4WMfIdJaw+ABqttjBhe6aKNRkWf/bbvYOGyw==} engines: {node: '>= 14.0.0'} - '@algolia/monitoring@1.17.1': - resolution: {integrity: sha512-gDtow+AUywTehRP8S1tWKx2IvhcJOxldAoqBxzN3asuQobF7er5n72auBeL++HY4ImEuzMi7PDOA/Iuwxs2IcA==} + '@algolia/monitoring@1.18.0': + resolution: {integrity: sha512-lBssglINIeGIR+8KyzH05NAgAmn1BCrm5D2T6pMtr/8kbTHvvrm1Zvcltc5dKUQEFyyx3J5+MhNc7kfi8LdjVw==} engines: {node: '>= 14.0.0'} - '@algolia/recommend@5.17.1': - resolution: {integrity: sha512-2992tTHkRe18qmf5SP57N78kN1D3e5t4PO1rt10sJncWtXBZWiNOK6K/UcvWsFbNSGAogFcIcvIMAl5mNp6RWA==} + '@algolia/recommend@5.18.0': + resolution: {integrity: sha512-uSnkm0cdAuFwdMp4pGT5vHVQ84T6AYpTZ3I0b3k/M3wg4zXDhl3aCiY8NzokEyRLezz/kHLEEcgb/tTTobOYVw==} engines: {node: '>= 14.0.0'} - '@algolia/requester-browser-xhr@5.17.1': - resolution: {integrity: sha512-XpKgBfyczVesKgr7DOShNyPPu5kqlboimRRPjdqAw5grSyHhCmb8yoTIKy0TCqBABZeXRPMYT13SMruUVRXvHA==} + '@algolia/requester-browser-xhr@5.18.0': + resolution: {integrity: sha512-1XFjW0C3pV0dS/9zXbV44cKI+QM4ZIz9cpatXpsjRlq6SUCpLID3DZHsXyE6sTb8IhyPaUjk78GEJT8/3hviqg==} engines: {node: '>= 14.0.0'} - '@algolia/requester-fetch@5.17.1': - resolution: {integrity: sha512-EhUomH+DZP5vb6DnEjT0GvXaXBSwzZnuU6hPGNU1EYKRXDouRjII/bIWpVjt7ycMgL2D2oQruqDh6rAWUhQwRw==} + '@algolia/requester-fetch@5.18.0': + resolution: {integrity: sha512-0uodeNdAHz1YbzJh6C5xeQ4T6x5WGiUxUq3GOaT/R4njh5t78dq+Rb187elr7KtnjUmETVVuCvmEYaThfTHzNg==} engines: {node: '>= 14.0.0'} - '@algolia/requester-node-http@5.17.1': - resolution: {integrity: sha512-PSnENJtl4/wBWXlGyOODbLYm6lSiFqrtww7UpQRCJdsHXlJKF8XAP6AME8NxvbE0Qo/RJUxK0mvyEh9sQcx6bg==} + '@algolia/requester-node-http@5.18.0': + resolution: {integrity: sha512-tZCqDrqJ2YE2I5ukCQrYN8oiF6u3JIdCxrtKq+eniuLkjkO78TKRnXrVcKZTmfFJyyDK8q47SfDcHzAA3nHi6w==} engines: {node: '>= 14.0.0'} '@alloc/quick-lru@5.2.0': @@ -665,12 +665,8 @@ packages: cpu: [x64] os: [win32] - '@cloudflare/workers-shared@0.11.0': - resolution: {integrity: sha512-A+lQ8xp7992qSeMmuQ0ssL6CPmm+ZmAv6Ddikan0n1jjpMAic+97l7xtVIsswSn9iLMFPYQ9uNN/8Fl0AgARIQ==} - engines: {node: '>=16.7.0'} - - '@cloudflare/workers-types@4.20241216.0': - resolution: {integrity: sha512-PGIINXS+aE9vD2GYyWXfRG+VyxxceRkGDCoPxqwUweh1Bfv75HVotyL/adJ7mRVwh3XZDifGBdTaLReTT+Fcog==} + '@cloudflare/workers-types@4.20241218.0': + resolution: {integrity: sha512-Y0brjmJHcAZBXOPI7lU5hbiXglQWniA1kQjot2ata+HFimyjPPcz+4QWBRrmWcMPo0OadR2Vmac7WStDLpvz0w==} '@corex/deepmerge@4.0.43': resolution: {integrity: sha512-N8uEMrMPL0cu/bdboEWpQYb/0i2K5Qn8eCsxzOmxSggJbbQte7ljMRoXm917AbntqTGOzdTu+vP3KOOzoC70HQ==} @@ -679,11 +675,11 @@ packages: resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} engines: {node: '>=12'} - '@docsearch/css@3.8.1': - resolution: {integrity: sha512-XiPhKT+ghUi4pEi/ACE9iDmwWsLA6d6xSwtR5ab48iB63OtYWFLZHUKdH7jHKTmwOs0Eg22TX4Kb3H5liFm5bQ==} + '@docsearch/css@3.8.2': + resolution: {integrity: sha512-y05ayQFyUmCXze79+56v/4HpycYF3uFqB78pLPrSV5ZKAlDuIAAJNhaRi8tTdRNXh05yxX/TyNnzD6LwSM89vQ==} - '@docsearch/react@3.8.1': - resolution: {integrity: sha512-7vgQuktQNBQdNWO1jbkiwgIrTZ0r5nPIHqcO3Z2neAWgkdUuldvvMfEOEaPXT5lqcezEv7i0h+tC285nD3jpZg==} + '@docsearch/react@3.8.2': + resolution: {integrity: sha512-xCRrJQlTt8N9GU0DG4ptwHRkfnSnD/YpdeaXe02iKfqs97TkZJv60yE+1eq/tjPcVnTW8dP5qLP7itifFVV5eg==} peerDependencies: '@types/react': '>= 16.8.0 < 19.0.0' react: '>= 16.8.0 < 19.0.0' @@ -699,8 +695,8 @@ packages: search-insights: optional: true - '@edgeandnode/common@6.38.0': - resolution: {integrity: sha512-MghQ6o72e3bNtTuqUNiL2tZf5JPoY8gT2zXMRJeiLFaekzxF9CKHEghCG4JpqaIxsKqAcNZ9s/GLFDsyEg7MtA==} + '@edgeandnode/common@6.39.0': + resolution: {integrity: sha512-R1hm02HjD2fcaoanUKJy64p8M3OGG0RWyHI77+qtvo2u9faBaLqf/aI/+An75Yx82MTpAE4Wc2AKo4GrHYRoyQ==} '@edgeandnode/eslint-config@2.0.3': resolution: {integrity: sha512-I89EK3cJNmJqJH1zLwyoKFFP6lrOWnPnZDgo8/Ew7BpOOA1Qhqcu0ek6erAo+mDt/4/4hlEu0Agrewr80NcImA==} @@ -711,8 +707,8 @@ packages: typescript: optional: true - '@edgeandnode/gds@5.39.1': - resolution: {integrity: sha512-cJHuca1UcLT6Tu8kMbySusXzPWZJUI/Kq/F11jWeYpl4P63CJoVHJBea3+WavN4K5FwaIo/Op7OmIo4b9NBPTA==} + '@edgeandnode/gds@5.39.2': + resolution: {integrity: sha512-zfY3lVUffyEgY5UDi6gOEqrJ35IRKnoLXMg/WAAHX6bvkn8XSEwZeef/Bvs8+dROtgGvXyGaWGaelXrpf/OZKQ==} peerDependencies: '@emotion/react': ^11 dayjs: ^1.11 @@ -724,11 +720,11 @@ packages: next: optional: true - '@edgeandnode/go@6.74.0': - resolution: {integrity: sha512-VWzATmJ/4fcuTs6kWhkHOcnKN1NYNpMH3lw7bWx/KSkwzNnDEqyDmDD47gxG6mnaSSyPtPdTg7bf6jxyfrjg8A==} + '@edgeandnode/go@6.75.0': + resolution: {integrity: sha512-eLgJyM0q3VYLAELCK5y8iLIAZJdozAnB7o4TVuJt6R/bZhkhdJDRaasc0XAPxjIF5S7x6H4qdvFLoo8RoY1YUg==} peerDependencies: - '@edgeandnode/common': ^6.38.0 - '@edgeandnode/gds': ^5.39.1 + '@edgeandnode/common': ^6.39.0 + '@edgeandnode/gds': ^5.39.2 '@emotion/react': ^11 next: '>=13' react: ^18 @@ -2455,8 +2451,8 @@ packages: react: '>= 16.8' react-dom: '>= 16.8' - '@pinax/graph-networks-registry@0.6.5': - resolution: {integrity: sha512-Urm/C+phjJLP+W5OF7hCUMrlSqSNGfX8V2BmzXmVkME/NX1yBZCQinR3Zk2L2uedpOBjmp3o7vm7bdQy+DMHhA==} + '@pinax/graph-networks-registry@0.6.6': + resolution: {integrity: sha512-CTCxALFO5xny41yEVQqoR4GwIIyTPwkqLCPd6iiVnY64zQFDGLKbiVbIFqOuVH9Q6srx+pLqlnVQep/1zNeddg==} '@pkgjs/parseargs@0.11.0': resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} @@ -2485,8 +2481,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-alert-dialog@1.1.3': - resolution: {integrity: sha512-5xzWppXTNZe6zFrTTwAJIoMJeZmdFe0l8ZqQrPGKAVvhdyOWR4r53/G7SZqx6/uf1J441oxK7GzmTkrrWDroHA==} + '@radix-ui/react-alert-dialog@1.1.4': + resolution: {integrity: sha512-A6Kh23qZDLy3PSU4bh2UJZznOrUdHImIXqF8YtUa6CN73f8EOO9XlXSCd9IHyPvIquTaa/kwaSWzZTtUvgXVGw==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2555,8 +2551,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-dialog@1.1.3': - resolution: {integrity: sha512-ujGvqQNkZ0J7caQyl8XuZRj2/TIrYcOGwqz5TeD1OMcCdfBuEMP0D12ve+8J5F9XuNUth3FAKFWo/wt0E/GJrQ==} + '@radix-ui/react-dialog@1.1.4': + resolution: {integrity: sha512-Ur7EV1IwQGCyaAuyDRiOLA5JIUZxELJljF+MbM/2NC0BYwfuRrbpS30BiQBJrVruscgUkieKkqXYDOoByaxIoA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2577,8 +2573,8 @@ packages: '@types/react': optional: true - '@radix-ui/react-dismissable-layer@1.1.2': - resolution: {integrity: sha512-kEHnlhv7wUggvhuJPkyw4qspXLJOdYoAP4dO2c8ngGuXTq1w/HZp1YeVB+NQ2KbH1iEG+pvOCGYSqh9HZOz6hg==} + '@radix-ui/react-dismissable-layer@1.1.3': + resolution: {integrity: sha512-onrWn/72lQoEucDmJnr8uczSNTujT0vJnA/X5+3AkChVPowr8n1yvIKIabhWyMQeMvvmdpsvcyDqx3X1LEXCPg==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2590,8 +2586,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-dropdown-menu@2.1.3': - resolution: {integrity: sha512-eKyAfA9e4HOavzyGJC6kiDIlHMPzAU0zqSqTg+VwS0Okvb9nkTo7L4TugkCUqM3I06ciSpdtYQ73cgB7tyUgVw==} + '@radix-ui/react-dropdown-menu@2.1.4': + resolution: {integrity: sha512-iXU1Ab5ecM+yEepGAWK8ZhMyKX4ubFdCNtol4sT9D0OVErG9PNElfx3TQhjw7n7BC5nFVz68/5//clWy+8TXzA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2647,8 +2643,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-menu@2.1.3': - resolution: {integrity: sha512-wY5SY6yCiJYP+DMIy7RrjF4shoFpB9LJltliVwejBm8T2yepWDJgKBhIFYOGWYR/lFHOCtbstN9duZFu6gmveQ==} + '@radix-ui/react-menu@2.1.4': + resolution: {integrity: sha512-BnOgVoL6YYdHAG6DtXONaR29Eq4nvbi8rutrV/xlr3RQCMMb3yqP85Qiw/3NReozrSW+4dfLkK+rc1hb4wPU/A==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2660,8 +2656,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-navigation-menu@1.2.2': - resolution: {integrity: sha512-7wHxgyNzOjsexOHFTXGJK/RDhKgrqj0siWJpm5i+sb7h+A6auY7efph6eMg0kOU4sVCLcbhHK7ZVueAXxOzvZA==} + '@radix-ui/react-navigation-menu@1.2.3': + resolution: {integrity: sha512-IQWAsQ7dsLIYDrn0WqPU+cdM7MONTv9nqrLVYoie3BPiabSfUVDe6Fr+oEt0Cofsr9ONDcDe9xhmJbL1Uq1yKg==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2673,8 +2669,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-popover@1.1.3': - resolution: {integrity: sha512-MBDKFwRe6fi0LT8m/Jl4V8J3WbS/UfXJtsgg8Ym5w5AyPG3XfHH4zhBp1P8HmZK83T8J7UzVm6/JpDE3WMl1Dw==} + '@radix-ui/react-popover@1.1.4': + resolution: {integrity: sha512-aUACAkXx8LaFymDma+HQVji7WhvEhpFJ7+qPz17Nf4lLZqtreGOFRiNQWQmhzp7kEWg9cOyyQJpdIMUMPc/CPw==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2786,8 +2782,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-toast@1.2.3': - resolution: {integrity: sha512-oB8irs7CGAml6zWbum7MNySTH/sR7PM1ZQyLV8reO946u73sU83yZUKijrMLNbm4hTOrJY4tE8Oa/XUKrOr2Wg==} + '@radix-ui/react-toast@1.2.4': + resolution: {integrity: sha512-Sch9idFJHJTMH9YNpxxESqABcAFweJG4tKv+0zo0m5XBvUSL8FM5xKcJLFLXononpePs8IclyX1KieL5SDUNgA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -2799,8 +2795,8 @@ packages: '@types/react-dom': optional: true - '@radix-ui/react-tooltip@1.1.5': - resolution: {integrity: sha512-IucoQPcK5nwUuztaxBQvudvYwH58wtRcJlv1qvaMSyIbL9dEBfFN0vRf/D8xDbu6HmAJLlNGty4z8Na+vIqe9Q==} + '@radix-ui/react-tooltip@1.1.6': + resolution: {integrity: sha512-TLB5D8QLExS1uDn7+wH/bjEmRurNMTzNrtq7IjaS4kjion9NtzsTGkvR5+i7yc9q01Pi2KMM2cN3f8UG4IvvXA==} peerDependencies: '@types/react': '*' '@types/react-dom': '*' @@ -3980,8 +3976,8 @@ packages: resolution: {integrity: sha512-OME7WR6+5QwQs45A2079r+/FS0zU944+JCQwUX9GyIriCxqw2pGu4F9IEqmlwD+zSIMml0+MJnJJ47pFgSyWDw==} engines: {node: '>=10'} - '@uniswap/sdk-core@6.1.0': - resolution: {integrity: sha512-pJVv8rJZwemcp9xINFG7hjxM4H+1FNiDqjpxBabwpCBsBFKfJPwe65Wa8pk8p1yT3QOgA0yFEQuavNsmTdtJ7w==} + '@uniswap/sdk-core@6.1.1': + resolution: {integrity: sha512-S9D5NTn7vV+wYwXbKOmYVjJidgmKY6zUsG5KGlQO4fNvcIde1TtVgtMXJl06qv1JeJKbGnzkIAZG4R82lSVZCg==} engines: {node: '>=10'} '@uniswap/swap-router-contracts@1.3.1': @@ -4072,8 +4068,8 @@ packages: resolution: {integrity: sha512-t7kGrt2fdfNvzy1LCAE9/OnIyMtizgFhgJmk7iLJwQsLmR7S86F8Q4aDRPbCfo7pISJP6Fx/tPdfFNjHS23WTA==} engines: {node: '>=18.0.0'} - '@whatwg-node/server@0.9.63': - resolution: {integrity: sha512-rHBN2murCcuuhQru/AQjA13lA9SzQAH9k8ENy4iZrAmY+C0yFYPud3HiFgPUgzR1B2KYUpIYKwC1UAUlkzASOQ==} + '@whatwg-node/server@0.9.64': + resolution: {integrity: sha512-4HSOWOjFvPLY7F6zqs/kbSBHInHIxd50xnwtp3NXUrI+d92iOBLHKm9aIULwAn2ABPcnfXb55VQwb4bEV3g6KA==} engines: {node: '>=18.0.0'} '@xobotyi/scrollbar-width@1.9.5': @@ -4160,8 +4156,8 @@ packages: ajv@8.17.1: resolution: {integrity: sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==} - algoliasearch@5.17.1: - resolution: {integrity: sha512-3CcbT5yTWJDIcBe9ZHgsPi184SkT1kyZi3GWlQU5EFgvq1V73X2sqHRkPCQMe0RA/uvZbB+1sFeAk73eWygeLg==} + algoliasearch@5.18.0: + resolution: {integrity: sha512-/tfpK2A4FpS0o+S78o3YSdlqXr0MavJIDlFK3XZrlXLy7vaRXJvW5jYg3v5e/wCaF8y0IpMjkYLhoV6QqfpOgw==} engines: {node: '>= 14.0.0'} ansi-align@3.0.1: @@ -4624,8 +4620,8 @@ packages: resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} engines: {node: '>= 8.10.0'} - chokidar@4.0.1: - resolution: {integrity: sha512-n8enUVCED/KVRQlab1hr3MVpcVMvxtZjmEa956u+4YijlmQED223XMSYj2tLuKvr4jcCTzNNMpQDUer72MMmzA==} + chokidar@4.0.2: + resolution: {integrity: sha512-/b57FK+bblSU+dfewfFe0rT1YjVDfOmeLQwCAuC+vwvgLkXboATqqmy+Ipux6JrF6L5joe5CBnFOw+gLWH6yKg==} engines: {node: '>= 14.16.0'} chownr@2.0.0: @@ -5246,8 +5242,8 @@ packages: resolution: {integrity: sha512-2QF/g9/zTaPDc3BjNcVTGoBbXBgYfMTTceLaYcFJ/W9kggFUkhxD/hMEeuLKbugyef9SqAx8cpgwlIP/jinUTA==} engines: {node: '>=4'} - dunder-proto@1.0.0: - resolution: {integrity: sha512-9+Sj30DIu+4KvHqMfLUGLFYL2PkURSYMVXJyXe92nFRvlYq5hBjLEhblKB+vkd/WVlUYMWigiY07T91Fkk0+4A==} + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} engines: {node: '>= 0.4'} duplexify@4.1.3: @@ -5259,8 +5255,8 @@ packages: ee-first@1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} - electron-to-chromium@1.5.73: - resolution: {integrity: sha512-8wGNxG9tAG5KhGd3eeA0o6ixhiNdgr0DcHWm85XPCphwZgD1lIEoi6t3VERayWao7SF7AAZTw6oARGJeVjH8Kg==} + electron-to-chromium@1.5.74: + resolution: {integrity: sha512-ck3//9RC+6oss/1Bh9tiAVFy5vfSKbRHAFh7Z3/eTRkEqJeWgymloShB17Vg3Z4nmDNp35vAd1BZ6CMW4Wt6Iw==} elkjs@0.9.3: resolution: {integrity: sha512-f/ZeWvW/BCXbhGEf1Ujp29EASo/lk1FDnETgNKwJrsVvGZhUWCZyg3xLJjAsxfOmt8KjswHmI5EwCQcPMpOYhQ==} @@ -5905,8 +5901,8 @@ packages: resolution: {integrity: sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==} engines: {node: '>=16'} - get-symbol-description@1.0.2: - resolution: {integrity: sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==} + get-symbol-description@1.1.0: + resolution: {integrity: sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==} engines: {node: '>= 0.4'} get-tsconfig@4.8.1: @@ -6306,8 +6302,8 @@ packages: is-alphanumerical@2.0.1: resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==} - is-array-buffer@3.0.4: - resolution: {integrity: sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==} + is-array-buffer@3.0.5: + resolution: {integrity: sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==} engines: {node: '>= 0.4'} is-arrayish@0.2.1: @@ -6380,8 +6376,8 @@ packages: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} - is-finalizationregistry@1.1.0: - resolution: {integrity: sha512-qfMdqbAQEwBw78ZyReKnlA8ezmPdb9BemzIIip/JkjaZUhitfXDkkr+3QTboW0JrSXT1QWyYShpvnNHGZ4c4yA==} + is-finalizationregistry@1.1.1: + resolution: {integrity: sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==} engines: {node: '>= 0.4'} is-fullwidth-code-point@3.0.0: @@ -6493,8 +6489,8 @@ packages: resolution: {integrity: sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==} engines: {node: '>= 0.4'} - is-typed-array@1.1.13: - resolution: {integrity: sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==} + is-typed-array@1.1.14: + resolution: {integrity: sha512-lQUsHzcTb7rH57dajbOuZEuMDXjs9f04ZloER4QOpjpKcaw4f98BRUrs8aiO9Z4G7i7B0Xhgarg6SCgYcYi8Nw==} engines: {node: '>= 0.4'} is-unc-path@1.0.0: @@ -6516,8 +6512,8 @@ packages: resolution: {integrity: sha512-SXM8Nwyys6nT5WP6pltOwKytLV7FqQ4UiibxVmW+EIosHcmCqkkjViTb5SNssDlkCiEYRP1/pdWUKVvZBmsR2Q==} engines: {node: '>= 0.4'} - is-weakset@2.0.3: - resolution: {integrity: sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ==} + is-weakset@2.0.4: + resolution: {integrity: sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==} engines: {node: '>= 0.4'} is-windows@1.0.2: @@ -6562,8 +6558,8 @@ packages: jest: optional: true - jiti@1.21.6: - resolution: {integrity: sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==} + jiti@1.21.7: + resolution: {integrity: sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==} hasBin: true joycon@3.1.1: @@ -6672,8 +6668,8 @@ packages: resolution: {integrity: sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==} engines: {node: '>=4.0'} - katex@0.16.15: - resolution: {integrity: sha512-yE9YJIEAk2aZ+FL/G8r+UGw0CTUzEA8ZFy6E+8tc3spHUKq3qBnzCkI1CQwGoI9atJhVyFPEypQsTY7mJ1Pi9w==} + katex@0.16.17: + resolution: {integrity: sha512-OyzSrXBllz+Jdc9Auiw0kt21gbZ4hkz8Q5srVAb2U9INcYIfGKbxe+bvNvEz1bQ/NrDeRRho5eLCyk/L03maAw==} hasBin: true keccak@3.0.4: @@ -7271,8 +7267,8 @@ packages: mitt@3.0.1: resolution: {integrity: sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==} - mixpanel-browser@2.56.0: - resolution: {integrity: sha512-GYeEz58pV2M9MZtK8vSPL4oJmCwGS08FDDRZvZwr5VJpWdT4Lgyg6zXhmNfCmSTEIw2coaarm7HZ4FL9dAVvnA==} + mixpanel-browser@2.57.1: + resolution: {integrity: sha512-Qd3S+hu1zocN+AQgKYYoBDYhFaRXaKXtVRLuaApB5j5Qhe25eqSDKtMzkki5AmmuXH68O9i2FW5HkgCWgzJLyg==} mkdirp@1.0.4: resolution: {integrity: sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==} @@ -8185,21 +8181,21 @@ packages: '@types/react': optional: true - react-remove-scroll@2.6.0: - resolution: {integrity: sha512-I2U4JVEsQenxDAKaVa3VZ/JeJZe0/2DxPWL8Tj8yLKctQJQiZM52pn/GWFpSp8dftjM3pSAHVJZscAnC/y+ySQ==} + react-remove-scroll@2.6.2: + resolution: {integrity: sha512-KmONPx5fnlXYJQqC62Q+lwIeAk64ws/cUw6omIumRzMRPqgnYqhSSti99nbj0Ry13bv7dF+BKn7NB+OqkdZGTw==} engines: {node: '>=10'} peerDependencies: - '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 - react: ^16.8.0 || ^17.0.0 || ^18.0.0 + '@types/react': '*' + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc peerDependenciesMeta: '@types/react': optional: true - react-smooth@4.0.3: - resolution: {integrity: sha512-PyxIrra8WZWrMRFcCiJsZ+JqFaxEINAt+v/w++wQKQlmO99Eh3+JTLeKApdTsLX2roBdWYXqPsaS8sO4UmdzIg==} + react-smooth@4.0.4: + resolution: {integrity: sha512-gnGKTpYwqL0Iii09gHobNolvX4Kiq4PKx6eWBCYYix+8cdw+cGo3do906l1NBPKkSWx1DghC1dlWG9L2uGd61Q==} peerDependencies: - react: ^16.8.0 || ^17.0.0 || ^18.0.0 - react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-style-singleton@2.2.3: resolution: {integrity: sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==} @@ -8981,8 +8977,8 @@ packages: tabbable@6.2.0: resolution: {integrity: sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==} - tailwindcss@3.4.16: - resolution: {integrity: sha512-TI4Cyx7gDiZ6r44ewaJmt0o6BrMCT5aK5e0rmJ/G9Xq3w7CX/5VXl/zIPEJZFUK5VEqwByyhqNPycPlvcK4ZNw==} + tailwindcss@3.4.17: + resolution: {integrity: sha512-w33E2aCvSDP0tW9RZuNXadXlkHXqFzSkQew/aIa2i/Sj8fThxwovwlXHSPXTbAHwEIhBFXAedUhP2tueAKP8Og==} engines: {node: '>=14.0.0'} hasBin: true @@ -9277,8 +9273,8 @@ packages: resolution: {integrity: sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==} engines: {node: '>= 0.4'} - typed-array-byte-length@1.0.1: - resolution: {integrity: sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==} + typed-array-byte-length@1.0.3: + resolution: {integrity: sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==} engines: {node: '>= 0.4'} typed-array-byte-offset@1.0.3: @@ -9337,8 +9333,8 @@ packages: resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==} engines: {node: '>=14.0'} - unenv-nightly@2.0.0-20241204-140205-a5d5190: - resolution: {integrity: sha512-jpmAytLeiiW01pl5bhVn9wYJ4vtiLdhGe10oXlJBuQEX8mxjxO8BlEXGHU4vr4yEikjFP1wsomTHt/CLU8kUwg==} + unenv-nightly@2.0.0-20241212-153011-af71c96: + resolution: {integrity: sha512-Yugb9yPs/EZsPOY+IHloqVVEcZeJ0uwwViTedsZjOtVeYO8I29B1rzU/p84FMT0R1Ht3bHsKkNV/rzrjSd07QA==} unicode-trie@2.0.0: resolution: {integrity: sha512-x7bc76x0bm4prf1VLg79uhAzKw8DVboClSN5VxJuQ+LKDOVEW9CdH+VY7SP+vX7xCYQqzzgQpFqz15zeLvAtZQ==} @@ -9467,12 +9463,12 @@ packages: urlpattern-polyfill@10.0.0: resolution: {integrity: sha512-H/A06tKD7sS1O1X2SshBVeA5FLycRpjqiBeqGKmBwBDBy28EnRjORxTNe269KSSr5un5qyWi1iL61wLxpd+ZOg==} - use-callback-ref@1.3.2: - resolution: {integrity: sha512-elOQwe6Q8gqZgDA8mrh44qRTQqpIHDcZ3hXTLjBe1i4ph8XpNJnO+aQf3NaG+lriLopI4HMx9VjQLfPQ6vhnoA==} + use-callback-ref@1.3.3: + resolution: {integrity: sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==} engines: {node: '>=10'} peerDependencies: - '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 - react: ^16.8.0 || ^17.0.0 || ^18.0.0 + '@types/react': '*' + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc peerDependenciesMeta: '@types/react': optional: true @@ -9740,9 +9736,10 @@ packages: workerpool@6.5.1: resolution: {integrity: sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==} - wrangler@3.95.0: - resolution: {integrity: sha512-3w5852i3FNyDz421K2Qk4v5L8jjwegO5O8E1+VAQmjnm82HFNxpIRUBq0bmM7CTLvOPI/Jjcmj/eAWjQBL7QYg==} + wrangler@3.97.0: + resolution: {integrity: sha512-NkFAigqZWe4NOK0gYROcpvdugaYJE/JRFrIZ+c5Q5/uie+25WH8OVbRvvmiXhWVhso56cZs2W2TPmAxT/sgHkw==} engines: {node: '>=16.17.0'} + deprecated: Downgrade to 3.96.0 hasBin: true peerDependencies: '@cloudflare/workers-types': ^4.20241205.0 @@ -9903,110 +9900,110 @@ packages: snapshots: - '@algolia/autocomplete-core@1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)(search-insights@2.17.3)': + '@algolia/autocomplete-core@1.17.7(@algolia/client-search@5.18.0)(algoliasearch@5.18.0)(search-insights@2.17.3)': dependencies: - '@algolia/autocomplete-plugin-algolia-insights': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)(search-insights@2.17.3) - '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1) + '@algolia/autocomplete-plugin-algolia-insights': 1.17.7(@algolia/client-search@5.18.0)(algoliasearch@5.18.0)(search-insights@2.17.3) + '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.18.0)(algoliasearch@5.18.0) transitivePeerDependencies: - '@algolia/client-search' - algoliasearch - search-insights - '@algolia/autocomplete-plugin-algolia-insights@1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)(search-insights@2.17.3)': + '@algolia/autocomplete-plugin-algolia-insights@1.17.7(@algolia/client-search@5.18.0)(algoliasearch@5.18.0)(search-insights@2.17.3)': dependencies: - '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1) + '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.18.0)(algoliasearch@5.18.0) search-insights: 2.17.3 transitivePeerDependencies: - '@algolia/client-search' - algoliasearch - '@algolia/autocomplete-preset-algolia@1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)': + '@algolia/autocomplete-preset-algolia@1.17.7(@algolia/client-search@5.18.0)(algoliasearch@5.18.0)': dependencies: - '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1) - '@algolia/client-search': 5.17.1 - algoliasearch: 5.17.1 + '@algolia/autocomplete-shared': 1.17.7(@algolia/client-search@5.18.0)(algoliasearch@5.18.0) + '@algolia/client-search': 5.18.0 + algoliasearch: 5.18.0 - '@algolia/autocomplete-shared@1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)': + '@algolia/autocomplete-shared@1.17.7(@algolia/client-search@5.18.0)(algoliasearch@5.18.0)': dependencies: - '@algolia/client-search': 5.17.1 - algoliasearch: 5.17.1 + '@algolia/client-search': 5.18.0 + algoliasearch: 5.18.0 - '@algolia/client-abtesting@5.17.1': + '@algolia/client-abtesting@5.18.0': dependencies: - '@algolia/client-common': 5.17.1 - '@algolia/requester-browser-xhr': 5.17.1 - '@algolia/requester-fetch': 5.17.1 - '@algolia/requester-node-http': 5.17.1 + '@algolia/client-common': 5.18.0 + '@algolia/requester-browser-xhr': 5.18.0 + '@algolia/requester-fetch': 5.18.0 + '@algolia/requester-node-http': 5.18.0 - '@algolia/client-analytics@5.17.1': + '@algolia/client-analytics@5.18.0': dependencies: - '@algolia/client-common': 5.17.1 - '@algolia/requester-browser-xhr': 5.17.1 - '@algolia/requester-fetch': 5.17.1 - '@algolia/requester-node-http': 5.17.1 + '@algolia/client-common': 5.18.0 + '@algolia/requester-browser-xhr': 5.18.0 + '@algolia/requester-fetch': 5.18.0 + '@algolia/requester-node-http': 5.18.0 - '@algolia/client-common@5.17.1': {} + '@algolia/client-common@5.18.0': {} - '@algolia/client-insights@5.17.1': + '@algolia/client-insights@5.18.0': dependencies: - '@algolia/client-common': 5.17.1 - '@algolia/requester-browser-xhr': 5.17.1 - '@algolia/requester-fetch': 5.17.1 - '@algolia/requester-node-http': 5.17.1 + '@algolia/client-common': 5.18.0 + '@algolia/requester-browser-xhr': 5.18.0 + '@algolia/requester-fetch': 5.18.0 + '@algolia/requester-node-http': 5.18.0 - '@algolia/client-personalization@5.17.1': + '@algolia/client-personalization@5.18.0': dependencies: - '@algolia/client-common': 5.17.1 - '@algolia/requester-browser-xhr': 5.17.1 - '@algolia/requester-fetch': 5.17.1 - '@algolia/requester-node-http': 5.17.1 + '@algolia/client-common': 5.18.0 + '@algolia/requester-browser-xhr': 5.18.0 + '@algolia/requester-fetch': 5.18.0 + '@algolia/requester-node-http': 5.18.0 - '@algolia/client-query-suggestions@5.17.1': + '@algolia/client-query-suggestions@5.18.0': dependencies: - '@algolia/client-common': 5.17.1 - '@algolia/requester-browser-xhr': 5.17.1 - '@algolia/requester-fetch': 5.17.1 - '@algolia/requester-node-http': 5.17.1 + '@algolia/client-common': 5.18.0 + '@algolia/requester-browser-xhr': 5.18.0 + '@algolia/requester-fetch': 5.18.0 + '@algolia/requester-node-http': 5.18.0 - '@algolia/client-search@5.17.1': + '@algolia/client-search@5.18.0': dependencies: - '@algolia/client-common': 5.17.1 - '@algolia/requester-browser-xhr': 5.17.1 - '@algolia/requester-fetch': 5.17.1 - '@algolia/requester-node-http': 5.17.1 + '@algolia/client-common': 5.18.0 + '@algolia/requester-browser-xhr': 5.18.0 + '@algolia/requester-fetch': 5.18.0 + '@algolia/requester-node-http': 5.18.0 - '@algolia/ingestion@1.17.1': + '@algolia/ingestion@1.18.0': dependencies: - '@algolia/client-common': 5.17.1 - '@algolia/requester-browser-xhr': 5.17.1 - '@algolia/requester-fetch': 5.17.1 - '@algolia/requester-node-http': 5.17.1 + '@algolia/client-common': 5.18.0 + '@algolia/requester-browser-xhr': 5.18.0 + '@algolia/requester-fetch': 5.18.0 + '@algolia/requester-node-http': 5.18.0 - '@algolia/monitoring@1.17.1': + '@algolia/monitoring@1.18.0': dependencies: - '@algolia/client-common': 5.17.1 - '@algolia/requester-browser-xhr': 5.17.1 - '@algolia/requester-fetch': 5.17.1 - '@algolia/requester-node-http': 5.17.1 + '@algolia/client-common': 5.18.0 + '@algolia/requester-browser-xhr': 5.18.0 + '@algolia/requester-fetch': 5.18.0 + '@algolia/requester-node-http': 5.18.0 - '@algolia/recommend@5.17.1': + '@algolia/recommend@5.18.0': dependencies: - '@algolia/client-common': 5.17.1 - '@algolia/requester-browser-xhr': 5.17.1 - '@algolia/requester-fetch': 5.17.1 - '@algolia/requester-node-http': 5.17.1 + '@algolia/client-common': 5.18.0 + '@algolia/requester-browser-xhr': 5.18.0 + '@algolia/requester-fetch': 5.18.0 + '@algolia/requester-node-http': 5.18.0 - '@algolia/requester-browser-xhr@5.17.1': + '@algolia/requester-browser-xhr@5.18.0': dependencies: - '@algolia/client-common': 5.17.1 + '@algolia/client-common': 5.18.0 - '@algolia/requester-fetch@5.17.1': + '@algolia/requester-fetch@5.18.0': dependencies: - '@algolia/client-common': 5.17.1 + '@algolia/client-common': 5.18.0 - '@algolia/requester-node-http@5.17.1': + '@algolia/requester-node-http@5.18.0': dependencies: - '@algolia/client-common': 5.17.1 + '@algolia/client-common': 5.18.0 '@alloc/quick-lru@5.2.0': {} @@ -10436,12 +10433,7 @@ snapshots: '@cloudflare/workerd-windows-64@1.20241205.0': optional: true - '@cloudflare/workers-shared@0.11.0': - dependencies: - mime: 3.0.0 - zod: 3.24.1 - - '@cloudflare/workers-types@4.20241216.0': {} + '@cloudflare/workers-types@4.20241218.0': {} '@corex/deepmerge@4.0.43': {} @@ -10449,14 +10441,14 @@ snapshots: dependencies: '@jridgewell/trace-mapping': 0.3.9 - '@docsearch/css@3.8.1': {} + '@docsearch/css@3.8.2': {} - '@docsearch/react@3.8.1(@algolia/client-search@5.17.1)(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(search-insights@2.17.3)': + '@docsearch/react@3.8.2(@algolia/client-search@5.18.0)(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(search-insights@2.17.3)': dependencies: - '@algolia/autocomplete-core': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1)(search-insights@2.17.3) - '@algolia/autocomplete-preset-algolia': 1.17.7(@algolia/client-search@5.17.1)(algoliasearch@5.17.1) - '@docsearch/css': 3.8.1 - algoliasearch: 5.17.1 + '@algolia/autocomplete-core': 1.17.7(@algolia/client-search@5.18.0)(algoliasearch@5.18.0)(search-insights@2.17.3) + '@algolia/autocomplete-preset-algolia': 1.17.7(@algolia/client-search@5.18.0)(algoliasearch@5.18.0) + '@docsearch/css': 3.8.2 + algoliasearch: 5.18.0 optionalDependencies: '@types/react': 18.3.17 react: 18.3.1 @@ -10465,10 +10457,10 @@ snapshots: transitivePeerDependencies: - '@algolia/client-search' - '@edgeandnode/common@6.38.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))': + '@edgeandnode/common@6.39.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))': dependencies: '@ethersproject/providers': 5.7.2 - '@pinax/graph-networks-registry': 0.6.5 + '@pinax/graph-networks-registry': 0.6.6 '@uniswap/sdk-core': 5.9.0 '@uniswap/v3-core': 1.0.1 '@uniswap/v3-sdk': 3.19.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) @@ -10508,28 +10500,28 @@ snapshots: - eslint-plugin-import-x - supports-color - '@edgeandnode/gds@5.39.1(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)': + '@edgeandnode/gds@5.39.2(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)': dependencies: - '@edgeandnode/common': 6.38.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) + '@edgeandnode/common': 6.39.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) '@emotion/react': 11.14.0(@types/react@18.3.17)(react@18.3.1) '@figma/code-connect': 1.2.4 '@floating-ui/react-dom': 2.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@headlessui/react': 2.2.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@phosphor-icons/react': 2.1.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-accordion': 1.2.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-alert-dialog': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-dialog': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-alert-dialog': 1.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dialog': 1.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-direction': 1.1.0(@types/react@18.3.17)(react@18.3.1) - '@radix-ui/react-dropdown-menu': 2.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dropdown-menu': 2.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-label': 2.1.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-popover': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-popover': 1.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-slider': 1.2.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-slot': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-switch': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-toast': 1.2.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-tooltip': 1.1.5(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-toast': 1.2.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-tooltip': 1.1.6(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@react-aria/utils': 3.26.0(react@18.3.1) - '@tailwindcss/container-queries': 0.1.1(tailwindcss@3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))) + '@tailwindcss/container-queries': 0.1.1(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))) '@tanem/react-nprogress': 5.0.53(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@theme-ui/core': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1) '@theme-ui/css': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1)) @@ -10558,7 +10550,7 @@ snapshots: react-virtuoso: 4.12.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1) recharts: 2.15.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) shiki: 1.24.2 - tailwindcss: 3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) + tailwindcss: 3.4.17(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) theme-ui: 0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1) typy: 3.3.0 universal-cookie: 7.2.2 @@ -10578,14 +10570,14 @@ snapshots: - typescript - utf-8-validate - '@edgeandnode/go@6.74.0(k3a5ck5km3cbp2dg3adcgi6xwe)': + '@edgeandnode/go@6.75.0(sgicwllcqwwzqtazwqh6qnnbhy)': dependencies: - '@edgeandnode/common': 6.38.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) - '@edgeandnode/gds': 5.39.1(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) + '@edgeandnode/common': 6.39.0(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) + '@edgeandnode/gds': 5.39.2(@emotion/is-prop-valid@0.8.8)(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(dayjs@1.11.13)(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2))(next@14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(theme-ui@0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1))(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2) '@emotion/react': 11.14.0(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-collapsible': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-dialog': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - '@radix-ui/react-navigation-menu': 1.2.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dialog': 1.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-navigation-menu': 1.2.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-portal': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@theme-ui/core': 0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1) classnames: 2.5.1 @@ -10593,7 +10585,7 @@ snapshots: motion: 11.15.0(@emotion/is-prop-valid@0.8.8)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - tailwindcss: 3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) + tailwindcss: 3.4.17(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) theme-ui: 0.17.1(@emotion/react@11.14.0(@types/react@18.3.17)(react@18.3.1))(react@18.3.1) optionalDependencies: next: 14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -11888,7 +11880,7 @@ snapshots: '@graphql-mesh/types': 0.98.10(@graphql-mesh/store@0.98.10)(@graphql-tools/utils@10.6.4(graphql@16.10.0))(graphql@16.10.0)(tslib@2.8.1) '@graphql-mesh/utils': 0.98.10(@graphql-mesh/cross-helpers@0.4.9(graphql@16.10.0))(@graphql-mesh/types@0.98.10)(@graphql-tools/utils@10.6.4(graphql@16.10.0))(graphql@16.10.0)(tslib@2.8.1) '@graphql-tools/utils': 10.6.4(graphql@16.10.0) - '@whatwg-node/server': 0.9.63 + '@whatwg-node/server': 0.9.64 graphql: 16.10.0 graphql-yoga: 5.10.6(graphql@16.10.0) tslib: 2.8.1 @@ -12831,7 +12823,7 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@pinax/graph-networks-registry@0.6.5': {} + '@pinax/graph-networks-registry@0.6.6': {} '@pkgjs/parseargs@0.11.0': optional: true @@ -12859,12 +12851,12 @@ snapshots: '@types/react': 18.3.17 '@types/react-dom': 18.3.5(@types/react@18.3.17) - '@radix-ui/react-alert-dialog@1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-alert-dialog@1.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@radix-ui/primitive': 1.1.1 '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-context': 1.1.1(@types/react@18.3.17)(react@18.3.1) - '@radix-ui/react-dialog': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dialog': 1.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-slot': 1.1.1(@types/react@18.3.17)(react@18.3.1) react: 18.3.1 @@ -12922,12 +12914,12 @@ snapshots: optionalDependencies: '@types/react': 18.3.17 - '@radix-ui/react-dialog@1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-dialog@1.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@radix-ui/primitive': 1.1.1 '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-context': 1.1.1(@types/react@18.3.17)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-focus-scope': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-id': 1.1.0(@types/react@18.3.17)(react@18.3.1) @@ -12939,7 +12931,7 @@ snapshots: aria-hidden: 1.2.4 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - react-remove-scroll: 2.6.0(@types/react@18.3.17)(react@18.3.1) + react-remove-scroll: 2.6.2(@types/react@18.3.17)(react@18.3.1) optionalDependencies: '@types/react': 18.3.17 '@types/react-dom': 18.3.5(@types/react@18.3.17) @@ -12950,7 +12942,7 @@ snapshots: optionalDependencies: '@types/react': 18.3.17 - '@radix-ui/react-dismissable-layer@1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-dismissable-layer@1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@radix-ui/primitive': 1.1.1 '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.17)(react@18.3.1) @@ -12963,13 +12955,13 @@ snapshots: '@types/react': 18.3.17 '@types/react-dom': 18.3.5(@types/react@18.3.17) - '@radix-ui/react-dropdown-menu@2.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-dropdown-menu@2.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@radix-ui/primitive': 1.1.1 '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-context': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-id': 1.1.0(@types/react@18.3.17)(react@18.3.1) - '@radix-ui/react-menu': 2.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-menu': 2.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.17)(react@18.3.1) react: 18.3.1 @@ -13011,14 +13003,14 @@ snapshots: '@types/react': 18.3.17 '@types/react-dom': 18.3.5(@types/react@18.3.17) - '@radix-ui/react-menu@2.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-menu@2.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@radix-ui/primitive': 1.1.1 '@radix-ui/react-collection': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-context': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-direction': 1.1.0(@types/react@18.3.17)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-focus-scope': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-id': 1.1.0(@types/react@18.3.17)(react@18.3.1) @@ -13032,19 +13024,19 @@ snapshots: aria-hidden: 1.2.4 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - react-remove-scroll: 2.6.0(@types/react@18.3.17)(react@18.3.1) + react-remove-scroll: 2.6.2(@types/react@18.3.17)(react@18.3.1) optionalDependencies: '@types/react': 18.3.17 '@types/react-dom': 18.3.5(@types/react@18.3.17) - '@radix-ui/react-navigation-menu@1.2.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-navigation-menu@1.2.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@radix-ui/primitive': 1.1.1 '@radix-ui/react-collection': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-context': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-direction': 1.1.0(@types/react@18.3.17)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-id': 1.1.0(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-presence': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -13059,12 +13051,12 @@ snapshots: '@types/react': 18.3.17 '@types/react-dom': 18.3.5(@types/react@18.3.17) - '@radix-ui/react-popover@1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-popover@1.1.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@radix-ui/primitive': 1.1.1 '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-context': 1.1.1(@types/react@18.3.17)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-focus-scope': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-id': 1.1.0(@types/react@18.3.17)(react@18.3.1) @@ -13077,7 +13069,7 @@ snapshots: aria-hidden: 1.2.4 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - react-remove-scroll: 2.6.0(@types/react@18.3.17)(react@18.3.1) + react-remove-scroll: 2.6.2(@types/react@18.3.17)(react@18.3.1) optionalDependencies: '@types/react': 18.3.17 '@types/react-dom': 18.3.5(@types/react@18.3.17) @@ -13187,13 +13179,13 @@ snapshots: '@types/react': 18.3.17 '@types/react-dom': 18.3.5(@types/react@18.3.17) - '@radix-ui/react-toast@1.2.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-toast@1.2.4(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@radix-ui/primitive': 1.1.1 '@radix-ui/react-collection': 1.1.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-context': 1.1.1(@types/react@18.3.17)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-portal': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-presence': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-primitive': 2.0.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -13207,12 +13199,12 @@ snapshots: '@types/react': 18.3.17 '@types/react-dom': 18.3.5(@types/react@18.3.17) - '@radix-ui/react-tooltip@1.1.5(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@radix-ui/react-tooltip@1.1.6(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@radix-ui/primitive': 1.1.1 '@radix-ui/react-compose-refs': 1.1.1(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-context': 1.1.1(@types/react@18.3.17)(react@18.3.1) - '@radix-ui/react-dismissable-layer': 1.1.2(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-id': 1.1.0(@types/react@18.3.17)(react@18.3.1) '@radix-ui/react-popper': 1.2.1(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@radix-ui/react-portal': 1.1.3(@types/react-dom@18.3.5(@types/react@18.3.17))(@types/react@18.3.17)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -14456,9 +14448,9 @@ snapshots: '@swc/counter': 0.1.3 tslib: 2.8.1 - '@tailwindcss/container-queries@0.1.1(tailwindcss@3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)))': + '@tailwindcss/container-queries@0.1.1(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)))': dependencies: - tailwindcss: 3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) + tailwindcss: 3.4.17(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)) '@tanem/react-nprogress@5.0.53(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: @@ -14873,7 +14865,7 @@ snapshots: tiny-invariant: 1.3.3 toformat: 2.0.0 - '@uniswap/sdk-core@6.1.0': + '@uniswap/sdk-core@6.1.1': dependencies: '@ethersproject/address': 5.7.0 '@ethersproject/bytes': 5.7.0 @@ -14914,7 +14906,7 @@ snapshots: dependencies: '@ethersproject/abi': 5.7.0 '@ethersproject/solidity': 5.7.0 - '@uniswap/sdk-core': 6.1.0 + '@uniswap/sdk-core': 6.1.1 '@uniswap/swap-router-contracts': 1.3.1(hardhat@2.14.1(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2))(typescript@5.7.2)) '@uniswap/v3-periphery': 1.4.4 '@uniswap/v3-staker': 1.0.0 @@ -15013,7 +15005,7 @@ snapshots: fast-querystring: 1.1.2 tslib: 2.8.1 - '@whatwg-node/server@0.9.63': + '@whatwg-node/server@0.9.64': dependencies: '@whatwg-node/disposablestack': 0.0.5 '@whatwg-node/fetch': 0.10.1 @@ -15102,21 +15094,21 @@ snapshots: json-schema-traverse: 1.0.0 require-from-string: 2.0.2 - algoliasearch@5.17.1: - dependencies: - '@algolia/client-abtesting': 5.17.1 - '@algolia/client-analytics': 5.17.1 - '@algolia/client-common': 5.17.1 - '@algolia/client-insights': 5.17.1 - '@algolia/client-personalization': 5.17.1 - '@algolia/client-query-suggestions': 5.17.1 - '@algolia/client-search': 5.17.1 - '@algolia/ingestion': 1.17.1 - '@algolia/monitoring': 1.17.1 - '@algolia/recommend': 5.17.1 - '@algolia/requester-browser-xhr': 5.17.1 - '@algolia/requester-fetch': 5.17.1 - '@algolia/requester-node-http': 5.17.1 + algoliasearch@5.18.0: + dependencies: + '@algolia/client-abtesting': 5.18.0 + '@algolia/client-analytics': 5.18.0 + '@algolia/client-common': 5.18.0 + '@algolia/client-insights': 5.18.0 + '@algolia/client-personalization': 5.18.0 + '@algolia/client-query-suggestions': 5.18.0 + '@algolia/client-search': 5.18.0 + '@algolia/ingestion': 1.18.0 + '@algolia/monitoring': 1.18.0 + '@algolia/recommend': 5.18.0 + '@algolia/requester-browser-xhr': 5.18.0 + '@algolia/requester-fetch': 5.18.0 + '@algolia/requester-node-http': 5.18.0 ansi-align@3.0.1: dependencies: @@ -15190,7 +15182,7 @@ snapshots: array-buffer-byte-length@1.0.1: dependencies: call-bind: 1.0.8 - is-array-buffer: 3.0.4 + is-array-buffer: 3.0.5 array-flatten@1.1.1: {} @@ -15253,7 +15245,7 @@ snapshots: es-abstract: 1.23.6 es-errors: 1.3.0 get-intrinsic: 1.2.6 - is-array-buffer: 3.0.4 + is-array-buffer: 3.0.5 as-table@1.0.55: dependencies: @@ -15469,7 +15461,7 @@ snapshots: browserslist@4.24.3: dependencies: caniuse-lite: 1.0.30001689 - electron-to-chromium: 1.5.73 + electron-to-chromium: 1.5.74 node-releases: 2.0.19 update-browserslist-db: 1.1.1(browserslist@4.24.3) @@ -15691,7 +15683,7 @@ snapshots: optionalDependencies: fsevents: 2.3.3 - chokidar@4.0.1: + chokidar@4.0.2: dependencies: readdirp: 4.0.2 @@ -16310,7 +16302,7 @@ snapshots: dset@3.1.4: {} - dunder-proto@1.0.0: + dunder-proto@1.0.1: dependencies: call-bind-apply-helpers: 1.0.1 es-errors: 1.3.0 @@ -16327,7 +16319,7 @@ snapshots: ee-first@1.1.1: {} - electron-to-chromium@1.5.73: {} + electron-to-chromium@1.5.74: {} elkjs@0.9.3: {} @@ -16415,7 +16407,7 @@ snapshots: es-to-primitive: 1.3.0 function.prototype.name: 1.1.7 get-intrinsic: 1.2.6 - get-symbol-description: 1.0.2 + get-symbol-description: 1.1.0 globalthis: 1.0.4 gopd: 1.2.0 has-property-descriptors: 1.0.2 @@ -16423,14 +16415,14 @@ snapshots: has-symbols: 1.1.0 hasown: 2.0.2 internal-slot: 1.1.0 - is-array-buffer: 3.0.4 + is-array-buffer: 3.0.5 is-callable: 1.2.7 is-data-view: 1.0.2 is-negative-zero: 2.0.3 is-regex: 1.2.1 is-shared-array-buffer: 1.0.3 is-string: 1.1.1 - is-typed-array: 1.1.13 + is-typed-array: 1.1.14 is-weakref: 1.1.0 math-intrinsics: 1.0.0 object-inspect: 1.13.3 @@ -16443,7 +16435,7 @@ snapshots: string.prototype.trimend: 1.0.9 string.prototype.trimstart: 1.0.8 typed-array-buffer: 1.0.2 - typed-array-byte-length: 1.0.1 + typed-array-byte-length: 1.0.3 typed-array-byte-offset: 1.0.3 typed-array-length: 1.0.7 unbox-primitive: 1.1.0 @@ -17365,7 +17357,7 @@ snapshots: get-intrinsic@1.2.6: dependencies: call-bind-apply-helpers: 1.0.1 - dunder-proto: 1.0.0 + dunder-proto: 1.0.1 es-define-property: 1.0.1 es-errors: 1.3.0 es-object-atoms: 1.0.0 @@ -17388,9 +17380,9 @@ snapshots: get-stream@8.0.1: {} - get-symbol-description@1.0.2: + get-symbol-description@1.1.0: dependencies: - call-bind: 1.0.8 + call-bound: 1.0.3 es-errors: 1.3.0 get-intrinsic: 1.2.6 @@ -17535,7 +17527,7 @@ snapshots: '@graphql-yoga/logger': 2.0.0 '@graphql-yoga/subscription': 5.0.2 '@whatwg-node/fetch': 0.10.1 - '@whatwg-node/server': 0.9.63 + '@whatwg-node/server': 0.9.64 dset: 3.1.4 graphql: 16.10.0 lru-cache: 10.4.3 @@ -17643,7 +17635,7 @@ snapshots: has-proto@1.2.0: dependencies: - dunder-proto: 1.0.0 + dunder-proto: 1.0.1 has-symbols@1.1.0: {} @@ -17978,9 +17970,10 @@ snapshots: is-alphabetical: 2.0.1 is-decimal: 2.0.1 - is-array-buffer@3.0.4: + is-array-buffer@3.0.5: dependencies: call-bind: 1.0.8 + call-bound: 1.0.3 get-intrinsic: 1.2.6 is-arrayish@0.2.1: {} @@ -18022,7 +18015,7 @@ snapshots: dependencies: call-bound: 1.0.3 get-intrinsic: 1.2.6 - is-typed-array: 1.1.13 + is-typed-array: 1.1.14 is-date-object@1.1.0: dependencies: @@ -18041,9 +18034,9 @@ snapshots: is-extglob@2.1.1: {} - is-finalizationregistry@1.1.0: + is-finalizationregistry@1.1.1: dependencies: - call-bind: 1.0.8 + call-bound: 1.0.3 is-fullwidth-code-point@3.0.0: {} @@ -18131,7 +18124,7 @@ snapshots: has-symbols: 1.1.0 safe-regex-test: 1.1.0 - is-typed-array@1.1.13: + is-typed-array@1.1.14: dependencies: which-typed-array: 1.1.16 @@ -18151,9 +18144,9 @@ snapshots: dependencies: call-bound: 1.0.3 - is-weakset@2.0.3: + is-weakset@2.0.4: dependencies: - call-bind: 1.0.8 + call-bound: 1.0.3 get-intrinsic: 1.2.6 is-windows@1.0.2: {} @@ -18201,7 +18194,7 @@ snapshots: rimraf: 2.7.1 ssim.js: 3.5.0 - jiti@1.21.6: {} + jiti@1.21.7: {} joycon@3.1.1: {} @@ -18314,7 +18307,7 @@ snapshots: object.assign: 4.1.5 object.values: 1.2.0 - katex@0.16.15: + katex@0.16.17: dependencies: commander: 8.3.0 @@ -18787,7 +18780,7 @@ snapshots: dayjs: 1.11.13 dompurify: 3.1.6 elkjs: 0.9.3 - katex: 0.16.15 + katex: 0.16.17 khroma: 2.1.0 lodash-es: 4.17.21 mdast-util-from-markdown: 1.3.1 @@ -18911,7 +18904,7 @@ snapshots: micromark-extension-math@2.1.2: dependencies: '@types/katex': 0.16.7 - katex: 0.16.15 + katex: 0.16.17 micromark-factory-space: 1.1.0 micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 @@ -19333,7 +19326,7 @@ snapshots: mitt@3.0.1: {} - mixpanel-browser@2.56.0: + mixpanel-browser@2.57.1: dependencies: rrweb: 2.0.0-alpha.13 @@ -19512,7 +19505,7 @@ snapshots: github-slugger: 2.0.0 graceful-fs: 4.2.11 gray-matter: 4.0.3 - katex: 0.16.15 + katex: 0.16.17 lodash.get: 4.4.2 next: 14.2.20(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-mdx-remote: 4.4.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -20027,11 +20020,11 @@ snapshots: postcss: 8.4.49 ts-node: 10.9.2(@types/node@22.10.2)(typescript@5.7.2) - postcss-load-config@6.0.1(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(yaml@2.6.1): + postcss-load-config@6.0.1(jiti@1.21.7)(postcss@8.4.49)(tsx@4.19.2)(yaml@2.6.1): dependencies: lilconfig: 3.1.3 optionalDependencies: - jiti: 1.21.6 + jiti: 1.21.7 postcss: 8.4.49 tsx: 4.19.2 yaml: 2.6.1 @@ -20280,18 +20273,18 @@ snapshots: optionalDependencies: '@types/react': 18.3.17 - react-remove-scroll@2.6.0(@types/react@18.3.17)(react@18.3.1): + react-remove-scroll@2.6.2(@types/react@18.3.17)(react@18.3.1): dependencies: react: 18.3.1 react-remove-scroll-bar: 2.3.8(@types/react@18.3.17)(react@18.3.1) react-style-singleton: 2.2.3(@types/react@18.3.17)(react@18.3.1) tslib: 2.8.1 - use-callback-ref: 1.3.2(@types/react@18.3.17)(react@18.3.1) + use-callback-ref: 1.3.3(@types/react@18.3.17)(react@18.3.1) use-sidecar: 1.1.3(@types/react@18.3.17)(react@18.3.1) optionalDependencies: '@types/react': 18.3.17 - react-smooth@4.0.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + react-smooth@4.0.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: fast-equals: 5.0.1 prop-types: 15.8.1 @@ -20394,7 +20387,7 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) react-is: 18.3.1 - react-smooth: 4.0.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + react-smooth: 4.0.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1) recharts-scale: 0.4.5 tiny-invariant: 1.3.3 victory-vendor: 36.9.2 @@ -20403,7 +20396,7 @@ snapshots: dependencies: call-bind: 1.0.8 define-properties: 1.2.1 - dunder-proto: 1.0.0 + dunder-proto: 1.0.1 es-abstract: 1.23.6 es-errors: 1.3.0 get-intrinsic: 1.2.6 @@ -20435,7 +20428,7 @@ snapshots: '@types/katex': 0.16.7 hast-util-from-html-isomorphic: 2.0.0 hast-util-to-text: 4.0.2 - katex: 0.16.15 + katex: 0.16.17 unist-util-visit-parents: 6.0.1 vfile: 6.0.3 @@ -21265,7 +21258,7 @@ snapshots: tabbable@6.2.0: {} - tailwindcss@3.4.16(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)): + tailwindcss@3.4.17(ts-node@10.9.2(@types/node@22.10.2)(typescript@5.7.2)): dependencies: '@alloc/quick-lru': 5.2.0 arg: 5.0.2 @@ -21275,7 +21268,7 @@ snapshots: fast-glob: 3.3.2 glob-parent: 6.0.2 is-glob: 4.0.3 - jiti: 1.21.6 + jiti: 1.21.7 lilconfig: 3.1.3 micromatch: 4.0.8 normalize-path: 3.0.0 @@ -21482,17 +21475,17 @@ snapshots: tsort@0.0.1: {} - tsup@8.3.5(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.7.2)(yaml@2.6.1): + tsup@8.3.5(jiti@1.21.7)(postcss@8.4.49)(tsx@4.19.2)(typescript@5.7.2)(yaml@2.6.1): dependencies: bundle-require: 5.0.0(esbuild@0.24.0) cac: 6.7.14 - chokidar: 4.0.1 + chokidar: 4.0.2 consola: 3.2.3 debug: 4.4.0(supports-color@8.1.1) esbuild: 0.24.0 joycon: 3.1.1 picocolors: 1.1.1 - postcss-load-config: 6.0.1(jiti@1.21.6)(postcss@8.4.49)(tsx@4.19.2)(yaml@2.6.1) + postcss-load-config: 6.0.1(jiti@1.21.7)(postcss@8.4.49)(tsx@4.19.2)(yaml@2.6.1) resolve-from: 5.0.0 rollup: 4.28.1 source-map: 0.8.0-beta.0 @@ -21572,15 +21565,15 @@ snapshots: dependencies: call-bind: 1.0.8 es-errors: 1.3.0 - is-typed-array: 1.1.13 + is-typed-array: 1.1.14 - typed-array-byte-length@1.0.1: + typed-array-byte-length@1.0.3: dependencies: call-bind: 1.0.8 for-each: 0.3.3 gopd: 1.2.0 has-proto: 1.2.0 - is-typed-array: 1.1.13 + is-typed-array: 1.1.14 typed-array-byte-offset@1.0.3: dependencies: @@ -21589,7 +21582,7 @@ snapshots: for-each: 0.3.3 gopd: 1.2.0 has-proto: 1.2.0 - is-typed-array: 1.1.13 + is-typed-array: 1.1.14 reflect.getprototypeof: 1.0.8 typed-array-length@1.0.7: @@ -21597,7 +21590,7 @@ snapshots: call-bind: 1.0.8 for-each: 0.3.3 gopd: 1.2.0 - is-typed-array: 1.1.13 + is-typed-array: 1.1.14 possible-typed-array-names: 1.0.0 reflect.getprototypeof: 1.0.8 @@ -21635,9 +21628,10 @@ snapshots: dependencies: '@fastify/busboy': 2.1.1 - unenv-nightly@2.0.0-20241204-140205-a5d5190: + unenv-nightly@2.0.0-20241212-153011-af71c96: dependencies: defu: 6.1.4 + mlly: 1.7.3 ohash: 1.1.4 pathe: 1.1.2 ufo: 1.5.4 @@ -21845,7 +21839,7 @@ snapshots: urlpattern-polyfill@10.0.0: {} - use-callback-ref@1.3.2(@types/react@18.3.17)(react@18.3.1): + use-callback-ref@1.3.3(@types/react@18.3.17)(react@18.3.1): dependencies: react: 18.3.1 tslib: 2.8.1 @@ -22087,7 +22081,7 @@ snapshots: has-tostringtag: 1.0.2 is-async-function: 2.0.0 is-date-object: 1.1.0 - is-finalizationregistry: 1.1.0 + is-finalizationregistry: 1.1.1 is-generator-function: 1.0.10 is-regex: 1.2.1 is-weakref: 1.1.0 @@ -22101,7 +22095,7 @@ snapshots: is-map: 2.0.3 is-set: 2.0.3 is-weakmap: 2.0.2 - is-weakset: 2.0.3 + is-weakset: 2.0.4 which-module@2.0.1: {} @@ -22158,14 +22152,13 @@ snapshots: workerpool@6.5.1: {} - wrangler@3.95.0(@cloudflare/workers-types@4.20241216.0): + wrangler@3.97.0(@cloudflare/workers-types@4.20241218.0): dependencies: '@cloudflare/kv-asset-handler': 0.3.4 - '@cloudflare/workers-shared': 0.11.0 '@esbuild-plugins/node-globals-polyfill': 0.2.3(esbuild@0.17.19) '@esbuild-plugins/node-modules-polyfill': 0.2.2(esbuild@0.17.19) blake3-wasm: 2.1.5 - chokidar: 4.0.1 + chokidar: 4.0.2 date-fns: 4.1.0 esbuild: 0.17.19 itty-time: 1.0.6 @@ -22175,11 +22168,11 @@ snapshots: resolve: 1.22.9 selfsigned: 2.4.1 source-map: 0.6.1 - unenv: unenv-nightly@2.0.0-20241204-140205-a5d5190 + unenv: unenv-nightly@2.0.0-20241212-153011-af71c96 workerd: 1.20241205.0 xxhash-wasm: 1.1.0 optionalDependencies: - '@cloudflare/workers-types': 4.20241216.0 + '@cloudflare/workers-types': 4.20241218.0 fsevents: 2.3.3 transitivePeerDependencies: - bufferutil diff --git a/website/package.json b/website/package.json index c2233f1de1ef..59630cce5734 100644 --- a/website/package.json +++ b/website/package.json @@ -15,14 +15,14 @@ "typecheck": "tsc --noEmit" }, "dependencies": { - "@edgeandnode/common": "^6.38.0", - "@edgeandnode/gds": "^5.39.1", - "@edgeandnode/go": "^6.74.0", + "@edgeandnode/common": "^6.39.0", + "@edgeandnode/gds": "^5.39.2", + "@edgeandnode/go": "^6.75.0", "@emotion/react": "^11.14.0", "@graphprotocol/contracts": "6.2.1", "@graphprotocol/nextra-theme": "workspace:*", "@phosphor-icons/react": "^2.1.7", - "mixpanel-browser": "^2.56.0", + "mixpanel-browser": "^2.57.1", "next": "^14.2.20", "next-seo": "^6.6.0", "next-sitemap": "^4.2.3", @@ -43,7 +43,7 @@ "fast-xml-parser": "^4.5.1", "graphql": "^16.10.0", "postcss": "^8.4.49", - "tailwindcss": "^3.4.16", + "tailwindcss": "^3.4.17", "tsx": "^4.19.2", "unified": "^11.0.5" } diff --git a/website/pages/[locale]/index.mdx b/website/pages/[locale]/index.mdx index 03c25f78ccbb..d315a3c874ab 100644 --- a/website/pages/[locale]/index.mdx +++ b/website/pages/[locale]/index.mdx @@ -35,7 +35,7 @@ ${t('index.networkRoles.description')} ## ${t('index.supportedNetworks.title')} ${t('index.supportedNetworks.description')} -${t('index.supportedNetworks.footer').replace('{0}', `[${t('index.supportedNetworks.title')}](/developing/supported-networks/)`)}` +${t('index.supportedNetworks.footer').replace('{0}', `[${t('index.supportedNetworks.title')}](/supported-networks/)`)}` const mdx = await buildDynamicMDX(rawMdx, { codeHighlight: false }) return { props: { diff --git a/website/pages/ar/about.mdx b/website/pages/ar/about.mdx index 7660b0dfd54b..8005f34aef5f 100644 --- a/website/pages/ar/about.mdx +++ b/website/pages/ar/about.mdx @@ -24,7 +24,7 @@ In the case of the example listed above, Bored Ape Yacht Club, you can perform b It would take **hours or even days** for a decentralized application (dapp) running in a browser to get an answer to these simple questions. -Alternatively, you have the option to set up your own server, process the transactions, store them in a database, and create an API endpoint to query the data. However, this option is [resource intensive](/network/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. +Alternatively, you have the option to set up your own server, process the transactions, store them in a database, and create an API endpoint to query the data. However, this option is [resource intensive](/resources/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. Blockchain properties, such as finality, chain reorganizations, and uncled blocks, add complexity to the process, making it time-consuming and conceptually challenging to retrieve accurate query results from blockchain data. diff --git a/website/pages/ar/arbitrum/_meta.js b/website/pages/ar/arbitrum/_meta.js deleted file mode 100644 index 321fe93849be..000000000000 --- a/website/pages/ar/arbitrum/_meta.js +++ /dev/null @@ -1,5 +0,0 @@ -import meta from '../../en/arbitrum/_meta.js' - -export default { - ...meta, -} diff --git a/website/pages/ar/arbitrum/arbitrum-faq.mdx b/website/pages/ar/arbitrum/arbitrum-faq.mdx deleted file mode 100644 index 2cf8402a7718..000000000000 --- a/website/pages/ar/arbitrum/arbitrum-faq.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: الأسئلة الشائعة حول Arbitrum ---- - -Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. - -## Why did The Graph implement an L2 Solution? - -By scaling The Graph on L2, network participants can now benefit from: - -- Upwards of 26x savings on gas fees - -- سرعة أكبر في المعاملات - -- Security inherited from Ethereum - -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. - -The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. - -## ما الذي يجب علي فعله لاستخدام The Graph في L2؟ - -The Graph’s billing system accepts GRT on Arbitrum, and users will need ETH on Arbitrum to pay their gas. While The Graph protocol started on Ethereum Mainnet, all activity, including the billing contracts, is now on Arbitrum One. - -Consequently, to pay for queries, you need GRT on Arbitrum. Here are a few different ways to achieve this: - -- If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - - - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) - - [TransferTo](https://transferto.xyz/swap) - -- If you have other assets on Arbitrum, you can swap them for GRT through a swapping protocol like Uniswap. - -- Alternatively, you can acquire GRT directly on Arbitrum through a decentralized exchange. - -Once you have GRT on Arbitrum, you can add it to your billing balance. - -للاستفادة من استخدام The Graph على L2 ، استخدم قائمة المنسدلة للتبديل بين الشبكات. - -![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) - -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? - -Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/arbitrum/l2-transfer-tools-guide/) for additional support. - -All indexing rewards are now entirely on Arbitrum. - -## Were there any risks associated with scaling the network to L2? - -All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). - -Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). - -## Are existing subgraphs on Ethereum working? - -All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. - -## Does GRT have a new smart contract deployed on Arbitrum? - -Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. - -## الأسئلة الشائعة حول إعداد الفواتير في Arbitrum - -## ما الذي علي فعله بشأن ال GRT في حساب الفوترة الخاص بي ؟ - -لا شئ! لقد تم نقل GRT الخاصة بك بشكل آمن إلى Arbitrum ويتم استخدامها للدفع مقابل الاستعلامات. - -## كيف أعرف أن أموالي قد انتقلت بشكل آمن إلى Arbitrum؟ - -All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). - -## كيف أعرف أن جسر Arbitrum آمن؟ - -The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. - -## ماذا علي أن أفعل إذا قمت بإضافة GRT جديد من محفظة Ethereum mainnet الخاصة بي؟ - -Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. - -Visit the [Billing page](/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. diff --git a/website/pages/ar/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/ar/arbitrum/l2-transfer-tools-faq.mdx deleted file mode 100644 index 250f550bcacd..000000000000 --- a/website/pages/ar/arbitrum/l2-transfer-tools-faq.mdx +++ /dev/null @@ -1,411 +0,0 @@ ---- -title: الأسئلة الشائعة حول أدوات النقل L2 ---- - -## عام - -### ما هي أدوات النقل L2؟ - -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. - -For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. - -These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. - -### هل يمكنني استخدام نفس المحفظة التي استخدمها في Ethereum mainnet؟ - -إذا كنت تستخدم محفظة [EOA] (https://ethereum.org/en/developers/docs/accounts/#types-of-account) ، فيمكنك استخدام نفس العنوان. إذا كانت محفظة Ethereum mainnet الخاصة بك عبارة عن عقد (مثل multisig) ، فيجب عليك تحديد [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) حيث سيتم إرسال التحويل الخاص بك. يرجى التحقق من العنوان بعناية لأن أي تحويلات إلى عنوان غير صحيح يمكن أن تؤدي إلى خسارة غير قابلة للرجوع. إذا كنت ترغب في استخدام multisig على L2 ، فتأكد من نشر عقد multisig على Arbitrum One. - -Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. - -The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. - -### What happens if I don’t finish my transfer in 7 days? - -The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). - -When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). - -This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. - -### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? - -If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. - -If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. - -## نقل الـ Subgraph (الرسم البياني الفرعي) - -### كيفكيف أقوم بتحويل الـ subgraph الخاص بي؟ - - - -لنقل الـ subgraph الخاص بك ، ستحتاج إلى إكمال الخطوات التالية: - -1. ابدأ التحويل على شبكة Ethereum mainnet - -2. انتظر 20 دقيقة للتأكيد - -3. قم بتأكيد نقل الـ subgraph على Arbitrum \ \* - -4. قم بإنهاء نشر الـ subgraph على Arbitrum - -5. جدث عنوان URL للاستعلام (مستحسن) - -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -### من أين يجب أن أبدأ التحويل ؟ - -يمكنك بدء عملية النقل من [Subgraph Studio] (https://thegraph.com/studio/) ، [Explorer ،] (https://thegraph.com/explorer) أو من أي صفحة تفاصيل subgraph. انقر فوق الزر "Transfer Subgraph" في صفحة تفاصيل الرسم الـ subgraph لبدء النقل. - -### كم من الوقت سأنتظر حتى يتم نقل الـ subgraph الخاص بي - -يستغرق وقت النقل حوالي 20 دقيقة. يعمل جسر Arbitrum في الخلفية لإكمال نقل الجسر تلقائيًا. في بعض الحالات ، قد ترتفع تكاليف الغاز وستحتاج إلى تأكيد المعاملة مرة أخرى. - -### هل سيظل الـ subgraph قابلاً للاكتشاف بعد أن أنقله إلى L2؟ - -سيكون الـ subgraph الخاص بك قابلاً للاكتشاف على الشبكة التي تم نشرها عليها فقط. على سبيل المثال ، إذا كان الـ subgraph الخاص بك موجودًا على Arbitrum One ، فيمكنك العثور عليه فقط في Explorer على Arbitrum One ولن تتمكن من العثور عليه على Ethereum. يرجى التأكد من تحديد Arbitrum One في مبدل الشبكة في أعلى الصفحة للتأكد من أنك على الشبكة الصحيحة. بعد النقل ، سيظهر الـ L1 subgraph على أنه مهمل. - -### هل يلزم نشر الـ subgraph الخاص بي لنقله؟ - -للاستفادة من أداة نقل الـ subgraph ، يجب أن يكون الرسم البياني الفرعي الخاص بك قد تم نشره بالفعل على شبكة Ethereum الرئيسية ويجب أن يكون لديه إشارة تنسيق مملوكة للمحفظة التي تمتلك الرسم البياني الفرعي. إذا لم يتم نشر الرسم البياني الفرعي الخاص بك ، فمن المستحسن أن تقوم ببساطة بالنشر مباشرة على Arbitrum One - ستكون رسوم الغاز أقل بكثير. إذا كنت تريد نقل رسم بياني فرعي منشور ولكن حساب المالك لا يملك إشارة تنسيق عليه ، فيمكنك الإشارة بمبلغ صغير (على سبيل المثال 1 GRT) من ذلك الحساب ؛ تأكد من اختيار إشارة "auto-migrating". - -### ماذا يحدث لإصدار Ethereum mainnet للرسم البياني الفرعي الخاص بي بعد أن النقل إلى Arbitrum؟ - -بعد نقل الرسم البياني الفرعي الخاص بك إلى Arbitrum ، سيتم إهمال إصدار Ethereum mainnet. نوصي بتحديث عنوان URL للاستعلام في غضون 48 ساعة. ومع ذلك ، هناك فترة سماح تحافظ على عمل عنوان URL للشبكة الرئيسية الخاصة بك بحيث يمكن تحديث أي دعم dapp لجهة خارجية. - -### بعد النقل ، هل أحتاج أيضًا إلى إعادة النشر على Arbitrum؟ - -بعد فترة النقل البالغة 20 دقيقة ، ستحتاج إلى تأكيد النقل لإكمال النقل ، أداة النقل ستوجهك للقيام بذلك. سيستمر دعم L1 endpoint الخاص بك خلال فترة النقل وفترة السماح. من المستحسن أن تقوم بتحديثه عندما يكون ذلك مناسبًا لك. - -### Will my endpoint experience downtime while re-publishing? - -It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. - -### هل يتم نشر وتخطيط الإصدار بنفس الطريقة في الـ L2 كما هو الحال في شبكة Ethereum Ethereum mainnet؟ - -Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. - -### هل سينتقل تنسيق الـ subgraph مع الـ subgraph ؟ - -إذا اخترت إشارة الترحيل التلقائي auto-migrating ، فسيتم نقل 100٪ من التنسيق مع الرسم البياني الفرعي الخاص بك إلى Arbitrum One. سيتم تحويل كل إشارة التنسيق الخاصة بالرسم الفرعي إلى GRT في وقت النقل ، وسيتم استخدام GRT المقابل لإشارة التنسيق الخاصة بك لصك الإشارة على L2 subgraph. - -يمكن للمنسقين الآخرين اختيار ما إذا كانوا سيسحبون أجزاء من GRT ، أو ينقلونه أيضًا إلى L2 لإنتاج إشارة على نفس الرسم البياني الفرعي. - -### هل يمكنني إعادة الرسم البياني الفرعي الخاص بي إلى Ethereum mainnet بعد أن أقوم بالنقل؟ - -بمجرد النقل ، سيتم إهمال إصدار شبكة Ethereum mainnet للرسم البياني الفرعي الخاص بك. إذا كنت ترغب في العودة إلى mainnet ، فستحتاج إلى إعادة النشر (redeploy) والنشر مرة أخرى على mainnet. ومع ذلك ، لا يُنصح بشدة بالتحويل مرة أخرى إلى شبكة Ethereum mainnet حيث سيتم في النهاية توزيع مكافآت الفهرسة بالكامل على Arbitrum One. - -### لماذا أحتاج إلى Bridged ETH لإكمال النقل؟ - -يتم دفع رسوم الغاز في Arbitrum One باستخدام ETHbridged ETH (ETH الذي تم ربطه بـ Arbitrum One). ومع ذلك ، فإن رسوم الغاز أقل بكثير عند مقارنتها بشبكة Ethereum mainnet. - -## Delegation(التفويض) - -### كيف أنقل تفويضي؟ - - - -لنقل تفويضك ، ستحتاج إلى إكمال الخطوات التالية: - -1. ابدأ نقل التفويض على شبكة Ethereum mainnet -2. انتظر 20 دقيقة للتأكيد -3. قم بتأكيد نقل التفويض على Arbitrum - -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -### ماذا يحدث لمكافآتي إذا بدأت عملية تحويل وكان لا يزال التخصيص مفتوحا على Ethereum mainnet؟ - -If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. - -### ماذا يحدث إذا كان المفهرس الذي أفوضه حاليًا غير موجود في Arbitrum One؟ - -لن يتم تمكين أداة النقل L2 إلا إذا قام المفهرس الذي فوضته بتحويل حصته إلى Arbitrum. - -### هل يملك المفوضين خيارا للتفويض إلى مفهرس آخر؟ - -If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. - -### ماذا لو لم أتمكن من العثور على المفهرس الذي قمت بالتوفيض إليه في L2؟ - -ستكتشف أداة النقل L2 المفهرس الذي قمت بالتفويض إليه مسبقًا تلقائيًا. - -### هل سأكون قادرًا على المزج والمطابقة أو "نشر" تفويضي عبر مفهرس جديد أو مفهرسين جدد بدلاً من المفهرس السابق؟ - -ستعمل أداة نقل L2 دائمًا على نقل التفويض إلى نفس المفهرس الذي فوضته سابقًا. وبمجرد الانتقال إلى L2 ، يمكنك إلغاء التفويض وانتظار فترة الذوبان ، وتحديد ما إذا كنت ترغب في تقسيم التفويض الخاص بك. - -### هل أنا خاضع لفترة الانتظار أم يمكنني السحب فورًا بعد استخدام أداة نقل التفويض L2؟ - -تتيح لك أداة النقل الانتقال على الفور إلى L2. إذا كنت ترغب في إلغاء التفويض ، فسيتعين عليك انتظار فترة الذوبان. ومع ذلك ، إذا قام المفهرس بتحويل جميع حصته إلى L2 ، فيمكنك السحب على شبكة Ethereum mainnet فورا. - -### هل يمكن أن تتأثر مكافآتي سلبًا إذا لم أحول تفويضي؟ - -من المتوقع أن تنتقل جميع مشاركات الشبكة إلى Arbitrum One في المستقبل. - -### كم من الوقت يستغرق استكمال نقل تفويضي إلى L2؟ - -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -### Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? - -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. - -### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? - -No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. - -If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. - -### هل هناك ضريبة على التفويض؟ - -لا ، يتم تفويض التوكن المستلمة على L2 إلى المفهرس المحدد نيابة عن المفوض المحدد دون فرض ضريبة التفويض. - -### Will my unrealized rewards be transferred when I transfer my delegation? - -​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. - -At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ - -### Is moving delegations to L2 mandatory? Is there a deadline? - -​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ - -### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? - -​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. - -Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ - -### I don't see a button to transfer my delegation. Why is that? - -​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. - -If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ - -### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? - -​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ - -### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? - -​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. - -The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. - -## Curation Signal(إشارة التنسيق) - -### كيف أنقل إشارة التنسيق الخاص بي؟ - -لنقل التنسيق الخاص بك ، ستحتاج إلى إكمال الخطوات التالية: - -1. ابدأ نقل الإشارة على شبكة Ethereum mainnet - -2. حدد عنوان L2 للمنسق \ \* - -3. انتظر 20 دقيقة للتأكيد - -\ \* إذا لزم الأمر -أنت تستخدم عنوان عقد. - -### كيف سأعرف ما إذا كان الرسم البياني الفرعي الذي قمت بعمل إشارة تنسيق عليه قد انتقل إلى L2؟ - -عند عرض صفحة تفاصيل الرسم البياني الفرعي ، ستعلمك لافتة بأنه تم نقل هذا الرسم البياني الفرعي. يمكنك اتباع التعليمات لنقل إشارة التنسيق الخاص بك. يمكنك أيضًا العثور على هذه المعلومات في صفحة تفاصيل الرسم البياني الفرعي لأي رسم بياني فرعي تم نقله. - -### ماذا لو كنت لا أرغب في نقل إشارة التنسيق الخاص بي إلى L2؟ - -عندما يتم إهمال الرسم البياني الفرعي ، يكون لديك خيار سحب الإشارة. وبالمثل ، إذا انتقل الرسم البياني الفرعي إلى L2 ، فيمكنك اختيار سحب الإشارة في شبكة Ethereum الرئيسية أو إرسال الإشارة إلى L2. - -### كيف أعرف أنه تم نقل إشارة التنسيق بنجاح؟ - -يمكن الوصول إلى تفاصيل الإشارة عبر Explorer بعد حوالي 20 دقيقة من بدء أداة النقل للـ L2. - -### هل يمكنني نقل إشاة التنسيق الخاص بي على أكثر من رسم بياني فرعي في وقت واحد؟ - -لا يوجد خيار كهذا حالياً. - -## Indexer Stake(حصة المفهرس) - -### كيف يمكنني تحويل حصتي إلى Arbitrum؟ - -> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. - - - -لتحويل حصتك ، ستحتاج إلى إكمال الخطوات التالية: - -1. ابدأ تحويل الحصص على شبكة Ethereum mainnet - -2. انتظر 20 دقيقة للتأكيد - -3. Confirm stake transfer on Arbitrum - -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -### هل سيتم تحويل حصتي بالكامل؟ - -يمكنك اختيار مقدار حصتك المراد تحويلها. إذا اخترت تحويل حصتك بالكامل مرة واحدة ، فستحتاج إلى إغلاق أي تخصيصات مفتوحة أولاً. - -إذا كنت تخطط لنقل أجزاء من حصتك في معاملات متعددة ، فيجب عليك دائمًا تحديد نفس عنوان المستفيد. - -ملاحظة: يجب أن تفي بالحد الأدنى من متطلبات الحصة على L2 في المرة الأولى التي تستخدم فيها أداة التحويل. يجب أن يرسل المفهرسون 100 ألف GRT كحد أدنى (عند استدعاء هذه الوظيفة في المرة الأولى). في حالة ترك جزء من الحصة على L1 ، يجب أن يكون أيضًا أكثر من 100 ألف GRT كحد أدنى وأن يكون كافيًا (جنبًا إلى جنب مع التفويضات) لتغطية مخصصاتك المفتوحة. - -### كم من الوقت لدي لتأكيد تحويل حصتي إلى Arbitrum؟ - -\ _ \ _ \ \* يجب تأكيد معاملتك لإتمام تحويل الحصة على Arbitrum. يجب إكمال هذه الخطوة في غضون 7 أيام وإلا فقد يتم فقدان الحصة. - -### ماذا لو كان لدي تخصيصات مفتوحة؟ - -إذا كنت لا ترسل كل حصصك، فإن أداة نقل L2 ستتحقق من أن الحد الأدنى 100 ألف GRT لا يزال في شبكة Ethereum mainnet وأن حصتك المتبقية وتفويضك كافيان لتغطية أي تخصيصات مفتوحة. قد تحتاج إلى إغلاق التخصيصات المفتوحة إذا كان رصيد GRT الخاص بك لا يغطي الحد الأدنى + المخصصات المفتوحة. - -### باستخدام أدوات النقل ، هل من الضروري الانتظار 28 يومًا لإلغاء الحصة في Ethereum mainnet قبل التحويل؟ - -لا ، يمكنك تحويل حصتك إلى L2 على الفور ، ولا داعي لإلغاء حصتك والانتظار قبل استخدام أداة التحويل. لا يسري الانتظار لمدة 28 يومًا إلا إذا كنت ترغب في سحب الحصة إلى محفظتك ، على شبكة Ethereum mainnet أو L2. - -### كم من الوقت سيستغرق تحويل حصتي؟ - -ستستغرق أداة النقل L2 حوالي 20 دقيقة لإكمال تحويل حصتك. - -### هل يجب أن أقوم بالفهرسة على Arbitrum قبل أن أنقل حصتي؟ - -يمكنك تحويل حصتك بشكل فعال أولاً قبل إعداد الفهرسة ، ولكن لن تتمكن من المطالبة بأي مكافآت على L2 حتى تقوم بتخصيصها لـ subgraphs على L2 وفهرستها وعرض POIs. - -### هل يستطيع المفوضون نقل تفويضهم قبل نقل indexing stake الخاص بي؟ - -لا ، لكي يقوم المفوضون بنقل GRT المفوضة إلى Arbitrum ، يجب أن يكون المفهرس الذي يتم التفويض إليه نشطًا في L2. - -### Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? - -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. - -### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? - -​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ - -### Can I transfer my stake to L2 if I am in the process of unstaking GRT? - -​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. - -## نقل Vesting Contract(عقد الاستحقاق) - -### كيف أنقل عقد الاستحقاق الخاص بي؟ - -لتحويل استحقاقك ، ستحتاج إلى إكمال الخطوات التالية: - -1. ابدأ تحويل الاستحقاق على شبكة Ethereum mainnet - -2. انتظر 20 دقيقة للتأكيد - -3. قم بالتأكيد على نقل الاستحقاق على Arbitrum - -### كيف يمكنني تحويل عقد الاستحقاق الخاص بي إذا كنت مخولًا جزئيًا فقط؟ - - - -1. قم بإيداع بعض ETH في عقد أداة النقل (يمكن أن تساعد واجهة المستخدم في تقدير مبلغ معقول) - -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. - -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. - -4. سحب أي ETH متبقي من عقد أداة النقل(transfer tool contract) - -### كيف يمكنني تحويل عقد الاستحقاق الخاص بي إذا كنت مخولًا بالكامل؟ - - - -بالنسبة لمن يتمتعون بكامل الصلاحيات ، فإن العملية مماثلة: - -1. قم بإيداع بعض ETH في عقد أداة النقل (يمكن أن تساعد واجهة المستخدم في تقدير مبلغ معقول) - -2. قم بتعيين عنوان L2 الخاص بك من خلال استدعاء عقد أداة النقل - -3. أرسل حصتك / تفويضك إلى L2 من خلال وظائف أداة التحويل "locked" في L1 Staking contract. - -4. سحب أي ETH متبقي من عقد أداة النقل(transfer tool contract) - -### هل يمكنني نقل عقد الاستحقاق الخاص بي إلى Arbitrum؟ - -يمكنك تحويل رصيد GRT الخاص بعقد الاستحقاق الخاص بك إلى عقد استحقاق آخر على L2. هذا شرط أساسي لنقل الحصة أو التفويض من عقد الاستحقاق الخاص بك إلى L2. يجب أن يحتوي عقد الاستحقاق على مبلغ غير صفري من GRT (يمكنك تحويل مبلغ صغير إليه مثل 1 GRT إذا لزم الأمر). - -عندما تقوم بتحويل GRT من عقد الاستحقاق L1 الخاص بك إلى L2 ، يمكنك اختيار المبلغ الذي تريد إرساله ويمكنك القيام بذلك عدة مرات. سيتم بدء عقد الاستحقاق على L2 في المرة الأولى التي تقوم فيها بتحويل GRT. - -تتم عمليات النقل باستخدام أداة النقل(Transfer Tool) التي ستكون مرئية في ملف تعريف Explorer الخاص بك عند الاتصال بحساب عقد الاستحقاق. - -يرجى ملاحظة أنك لن تكون قادرًا على استخلاص/ سحب GRT من عقد الاستحقاق على L2 حتى نهاية الجدول الزمني للاستحقاق عندما يتم تخويل عقدك بالكامل. إذا كنت بحاجة لتحرير GRT قبل ذلك الحين ، فيمكنك إعادة نقل GRT إلى عقد الاستحقاق على L1 باستخدام أداة تحويل أخرى متاحة لهذا الغرض. - -إذا لم تقم بتحويل أي رصيد من عقود الاستحقاق إلى L2 ، وكان عقد الاستحقاق الخاص بك مخولًا بالكامل ، فلا يجب عليك تحويل عقد الاستحقاق الخاص بك إلى L2. بدلاً من ذلك ، يمكنك استخدام أدوات التحويل لتعيين عنوان محفظة L2 ، وتحويل حصتك أو تفويضك مباشرةً إلى هذه المحفظة العادية على L2. - -### أنا أستخدم عقد الاستحقاق الخاص بي للقيام بالتخزين (staking) في mainnet. هل يمكنني تحويل حصتي إلى Arbitrum؟ - -نعم ، ولكن إذا كان عقدك لا يزال مستحقًا ، فيمكنك فقط نقل الحصة بحيث تكون مملوكة لعقد الاستحقاق L2 الخاص بك. يجب أولاً تهيئة عقد L2 هذا عن طريق تحويل بعض رصيد GRT باستخدام أداة تحويل عقد الاستحقاق في Explorer. إذا كان عقدك مخولًا بالكامل ، فيمكنك تحويل حصتك إلى أي عنوان على L2 ، ولكن يجب عليك تعيينها مسبقًا وإيداع بعض ETH لأداة التحويل L2 لدفع ثمن غاز L2. - -### أنا أستخدم عقد الاستحقاق الخاص بي للتفويض على mainnet. هل يمكنني نقل تفويضاتي إلى Arbitrum؟ - -نعم ، ولكن إذا كان عقدك لا يزال مستحقًا ، فيمكنك فقط نقل التفويض بحيث يكون مملوكًا لعقد الاستحقاق L2 الخاص بك. يجب أولاً تهيئة عقد L2 هذا عن طريق تحويل بعضا من رصيد GRT باستخدام أداة تحويل عقد الاستحقاق في Explorer. إذا كان عقدك مخولًا بالكامل ، فيمكنك نقل تفويضك إلى أي عنوان في L2 ، ولكن يجب عليك تعيينه مسبقًا وإيداع بعض ETH لأداة التحويل L2 لدفع ثمن غاز L2. - -### هل يمكنني تحديد مستفيد مختلف لعقد الاستحقاق الخاص بي على L2؟ - -نعم ، في المرة الأولى التي تقوم فيها بتحويل رصيد وإعداد عقد استحقاق L2 ، يمكنك تحديد مستفيد من L2. تأكد من أن هذا المستفيد عبارة عن محفظة يمكنها إجراء المعاملات على Arbitrum One ، يجب أن تكون EOA أو multisig تم نشرها على Arbitrum One. - -إذا كان عقدك مخولًا بالكامل ، فلن تقوم بإعداد عقد استحقاق على L2 ؛ بدلاً من ذلك ، ستقوم بتعيين عنوان محفظة L2 وستكون هذه هي المحفظة المستلمة لحصتك أو تفويضك في Arbitrum. - -### عقدي مخول بالكامل. هل يمكنني نقل حصتي أو تفويضي إلى عنوان آخر ليس عقداً استحقاقيا على L2؟ - -نعم. إذا لم تقم بتحويل أي رصيد من عقود الاستحقاق إلى L2 ، وكان عقد الاستحقاق الخاص بك مخولًا بالكامل ، فلا يجب عليك تحويل عقد الاستحقاق الخاص بك إلى L2. بدلاً من ذلك ، يمكنك استخدام أدوات التحويل لتعيين عنوان محفظة L2 ، وتحويل حصتك أو تفويضك مباشرةً إلى هذه المحفظة العادية على L2. - -هذا يسمح لك بتحويل حصتك أو تفويضك إلى أي عنوان L2. - -### عقد الاستحقاق الخاص بي لا يزال مستحقًا. كيف أقوم بتحويل رصيد عقد الاستحقاق الخاص بي إلى L2؟ - -تنطبق هذه الخطوات فقط إذا كان عقدك لا يزال مستحقًا ، أو إذا كنت قد استخدمت هذه العملية من قبل عندما كان عقدك لا يزال مستحقًا. - -لتحويل عقد الاستحقاق الخاص بك إلى L2 ، سوف ترسل أي رصيد GRT إلى L2 باستخدام أدوات التحويل ، والتي ستعمل على تهيئة عقد استحقاق L2 الخاص بك: - -1. قم بإيداع بعض ETH في عقد أداة النقل (سيتم استخدام هذا لدفع ثمن غاز L2) - -2. إبطال وصول البروتوكول إلى عقد الاستحقاق (مطلوب للخطوة التالية) - -3. امنح البروتوكول حق الوصول إلى عقد الاستحقاق (سيسمح لعقدك بالتفاعل مع أداة التحويل) - -4. حدد عنوان المستفيد على L2 \ \* وابدأ في تحويل الرصيد على Ethereum mainnet - -5. انتظر 20 دقيقة للتأكيد - -6. قم بتأكيد تحويل الرصيد على L2 - -\ \* إذا لزم الأمر -أنت تستخدم عنوان عقد. - -\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? - -​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. - -If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ - -### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? - -​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. - -When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ - -### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? - -​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. - -You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. - -### هل يمكنني إرجاع عقد الاستحقاق إلى L1؟ - -ليست هناك حاجة للقيام بذلك لأن عقد الاستحقاق الخاص بك لا يزال في L1. عندما تستخدم أدوات التحويل ، فأنت تقوم فقط بإنشاء عقد جديد في L2 مرتبط بعقد الاستحقاق L1 الخاص بك ، ويمكنك إرسال GRT ذهابًا وإيابًا بينهما. - -### لماذا أحتاج إلى تغيير عقد الاستحقاق الخاص بي من البداية؟ - -يجب عليك إعداد عقد استحقاق L2 حتى يتمكن هذا الحساب من امتلاك حصتك أو تفويضك في L2. وإلا ، لن يكون هناك وسيلة لك لنقل الحصة / التفويض إلى L2 دون "الهروب" من عقد الاستحقاق. - -### ماذا يحدث إذا حاولت سحب عقدي عندما لم يتم تنفيذه بالكامل؟هل هذا ممكن؟ - -هذا ليس احتمال. يمكنك إعادة الأموال إلى L1 وسحبها هناك. - -### ماذا لو لم أرغب في نقل عقد الاستحقاق الخاص بي إلى L2؟ - -يمكنك الاستمرار في التخزين / التفويض على L1. بمرور الوقت ، قد ترغب في التفكير في الانتقال إلى L2 لتمكين المكافآت هناك حيث يتوسع البروتوكول في Arbitrum. لاحظ أن أدوات التحويل هذه مخصصة لمنح العقود المسموح لها بالمشاركة والتفويض في البروتوكول. إذا كان عقدك لا يسمح بالتخزين أو التفويض ، أو كان قابلاً للإلغاء ، فلا توجد أداة نقل متاحة. ستظل قادرًا على سحب GRT من L1 عندما يكون ذلك متاحًا. diff --git a/website/pages/ar/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/ar/arbitrum/l2-transfer-tools-guide.mdx deleted file mode 100644 index 33b5b1628783..000000000000 --- a/website/pages/ar/arbitrum/l2-transfer-tools-guide.mdx +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: L2 Transfer Tools Guide ---- - -يسهل الغراف الانتقال إلى الطبقة الثانبة على أربترم. لكل مشارك في البروتوكول ، توجد مجموعة من أدوات نقل الطبقة الثانبة لجعل النقل إليها سلسًا لجميع المشاركين في الشبكة. ستطلب منك هذه الأدوات اتباع مجموعة محددة من الخطوات بناءً على ما تقوم بنقله. - -بعض الأسئلة المتكررة حول هذه الأدوات تمت الإجابة عليها في [الأسئلة الشائعة حول أدوات نقل الطبقة الثانية] (/arbitrum/l2-transfer-tools-faq). تحتوي الأسئلة الشائعة على تفسيرات متعمقة لكيفية استخدام الأدوات وكيفية عملها والأمور التي يجب وضعها في الاعتبار عند إستخدامها. - -## كيف تنقل الغراف الفرعي الخاص بك إلى شبكة آربترم (الطبقة الثانية) - - - -## فوائد نقل الغراف الفرعي الخاصة بك - -مجتمع الغراف والمطورون الأساسيون كانوا [يستعدون] (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) للإنتقال إلى آربترم على مدى العام الماضي. وتعتبر آربترم سلسلة كتل من الطبقة الثانية أو "L2"، حيث ترث الأمان من سلسلة الإيثيريوم ولكنها توفر رسوم غازٍ أقل بشكلٍ كبير. - -عندما تقوم بنشر أو ترقية الغرافات الفرعية الخاصة بك إلى شبكة الغراف، فأنت تتفاعل مع عقودٍ ذكيةٍ في البروتوكول وهذا يتطلب دفع رسوم الغاز باستخدام عملة الايثيريوم. من خلال نقل غرافاتك الفرعية إلى آربترم، فإن أي ترقيات مستقبلية لغرافك الفرعي ستتطلب رسوم غازٍ أقل بكثير. الرسوم الأقل، وكذلك حقيقة أن منحنيات الترابط التنسيقي على الطبقة الثانية مستقيمة، تجعل من الأسهل على المنسِّقين الآخرين تنسيق غرافك الفرعي، ممّا يزيد من مكافآت المفهرِسين على غرافك الفرعي. هذه البيئة ذات التكلفة-الأقل كذلك تجعل من الأرخص على المفهرسين أن يقوموا بفهرسة وخدمة غرافك الفرعي. سوف تزداد مكافآت الفهرسة على آربترم وتتناقص على شبكة إيثيريوم الرئيسية على مدى الأشهر المقبلة، لذلك سيقوم المزيد والمزيد من المُفَهرِسين بنقل ودائعهم المربوطة وتثبيت عملياتهم على الطبقة الثانية. - -## فهم ما يحدث مع الإشارة وغرافك الفرعي على الطبقة الأولى وعناوين مواقع الإستعلام - -عند نقل سبجراف إلى Arbitrum، يتم استخدام جسر Arbitrum GRT، الذي بدوره يستخدم جسر Arbitrum الأصلي، لإرسال السبجراف إلى L2. سيؤدي عملية "النقل" إلى إهمال السبجراف على شبكة الإيثيريوم الرئيسية وإرسال المعلومات لإعادة إنشاء السبجراف على L2 باستخدام الجسر. ستتضمن أيضًا رصيد GRT المرهون المرتبط بمالك السبجراف، والذي يجب أن يكون أكبر من الصفر حتى يقبل الجسر النقل. - -عندما تختار نقل الرسم البياني الفرعي ، سيؤدي ذلك إلى تحويل جميع إشارات التنسيق الخاصة بالرسم الفرعي إلى GRT. هذا يعادل "إهمال" الرسم البياني الفرعي على الشبكة الرئيسية. سيتم إرسال GRT المستخدمة لعملية التنسيق الخاصة بك إلى L2 جمباً إلى جمب مع الرسم البياني الفرعي ، حيث سيتم استخدامها لإنتاج الإشارة نيابة عنك. - -يمكن للمنسقين الآخرين اختيار ما إذا كانوا سيسحبون جزء من GRT الخاص بهم ، أو نقله أيضًا إلى L2 لصك إشارة على نفس الرسم البياني الفرعي. إذا لم يقم مالك الرسم البياني الفرعي بنقل الرسم البياني الفرعي الخاص به إلى L2 وقام بإيقافه يدويًا عبر استدعاء العقد ، فسيتم إخطار المنسقين وسيتمكنون من سحب تنسيقهم. - -بمجرد نقل الرسم البياني الفرعي ، لن يتلقى المفهرسون بعد الآن مكافآت لفهرسة الرسم البياني الفرعي، نظرًا لأنه يتم تحويل كل التنسيق لـ GRT. ومع ذلك ، سيكون هناك مفهرسون 1) سيستمرون في خدمة الرسوم البيانية الفرعية المنقولة لمدة 24 ساعة ، و 2) سيبدأون فورًا في فهرسة الرسم البياني الفرعي على L2. ونظرًا لأن هؤلاء المفهرسون لديهم بالفعل رسم بياني فرعي مفهرس ، فلا داعي لانتظار مزامنة الرسم البياني الفرعي ، وسيكون من الممكن الاستعلام عن الرسم البياني الفرعي على L2 مباشرة تقريبًا. - -يجب إجراء الاستعلامات على الرسم البياني الفرعي في L2 على عنوان URL مختلف (على \`` Arbitrum-gateway.thegraph.com`) ، لكن عنوان URL L1 سيستمر في العمل لمدة 48 ساعة على الأقل. بعد ذلك ، ستقوم بوابة L1 بإعادة توجيه الاستعلامات إلى بوابة L2 (لبعض الوقت) ، ولكن هذا سيضيف زمن تأخير لذلك يوصى تغيير جميع استعلاماتك إلى عنوان URL الجديد في أقرب وقت ممكن. - -## اختيار محفظة L2 الخاصة بك - -عندما قمت بنشر subgraph الخاص بك على الشبكة الرئيسية ، فقد استخدمت محفظة متصلة لإنشاء subgraph ، وتمتلك هذه المحفظة NFT الذي يمثل هذا subgraph ويسمح لك بنشر التحديثات. - -عند نقل الرسم البياني الفرعي إلى Arbitrum ، يمكنك اختيار محفظة مختلفة والتي ستمتلك هذا الـ subgraph NFT على L2. - -إذا كنت تستخدم محفظة "عادية" مثل MetaMask (حساب مملوك خارجيًا EOA ، محفظة ليست بعقد ذكي) ، فهذا اختياري ويوصى بالاحتفاظ بعنوان المالك نفسه كما في L1. - -إذا كنت تستخدم محفظة بعقد ذكي ، مثل multisig (على سبيل المثال Safe) ، فإن اختيار عنوان مختلف لمحفظة L2 أمر إلزامي ، حيث من المرجح أن هذا الحساب موجود فقط على mainnet ولن تكون قادرًا على إجراء المعاملات على Arbitrum باستخدام هذه المحفظة. إذا كنت ترغب في الاستمرار في استخدام محفظة عقد ذكية أو multisig ، فقم بإنشاء محفظة جديدة على Arbitrum واستخدم عنوانها كمالك للرسم البياني الفرعي الخاص بك على L2. - -** من المهم جدًا استخدام عنوان محفظة تتحكم فيه ، ويمكنه إجراء معاملات على Arbitrum. وإلا فسيتم فقد الرسم البياني الفرعي ولا يمكن استعادته. ** - -## التحضير لعملية النقل: إنشاء جسر لـبعض ETH - -يتضمن نقل الغراف الفرعي إرسال معاملة عبر الجسر ، ثم تنفيذ معاملة أخرى على شبكة أربترم. تستخدم المعاملة الأولى الإيثيريوم على الشبكة الرئيسية ، وتتضمن بعضًا من إيثيريوم لدفع ثمن الغاز عند استلام الرسالة على الطبقة الثانية. ومع ذلك ، إذا كان هذا الغاز غير كافٍ ، فسيتعين عليك إعادة إجراء المعاملة ودفع ثمن الغاز مباشرةً على الطبقة الثانية (هذه هي "الخطوة 3: تأكيد التحويل" أدناه). يجب تنفيذ هذه الخطوة ** في غضون 7 أيام من بدء التحويل **. علاوة على ذلك ، سيتم إجراء المعاملة الثانية مباشرة على شبكة أربترم ("الخطوة 4: إنهاء التحويل على الطبقة الثانية"). لهذه الأسباب ، ستحتاج بعضًا من إيثيريوم في محفظة أربترم. إذا كنت تستخدم متعدد التواقيع أو عقداً ذكياً ، فيجب أن يكون هناك بعضًا من إيثيريوم في المحفظة العادية (حساب مملوك خارجيا) التي تستخدمها لتنفيذ المعاملات ، وليس على محفظة متعددة التواقيع. - -يمكنك شراء إيثيريوم من بعض المنصات وسحبها مباشرة إلى أربترم، أو يمكنك استخدام جسر أربترم لإرسال إيثيريوم من محفظة الشبكة الرئيسيةإلى الطبقة الثانية: [bridge.arbitrum.io] (http://bridge.arbitrum.io). نظرًا لأن رسوم الغاز على أربترم أقل ، فستحتاج فقط إلى مبلغ صغير. من المستحسن أن تبدأ بمبلغ منخفض (0 على سبيل المثال ، 01 ETH) للموافقة على معاملتك. - -## العثور على أداة نقل الغراف الفرعي - -يمكنك العثور على أداة نقل L2 في صفحة الرسم البياني الفرعي الخاص بك على Subgraph Studio: - -![أداة النقل](/img/L2-transfer-tool1.png) - -إذا كنت متصلاً بالمحفظة التي تمتلك الغراف الفرعي، فيمكنك الوصول إليها عبر المستكشف، وذلك عن طريق الانتقال إلى صفحة الغراف الفرعي على المستكشف: - -![Transferring to L2](/img/transferToL2.png) - -سيؤدي النقر فوق زر النقل إلى الطبقة الثانية إلى فتح أداة النقل حيث يمكنك بدء عملية النقل. - -## الخطوة 1: بدء عملية النقل - -قبل بدء عملية النقل، يجب أن تقرر أي عنوان سيكون مالكًا للغراف الفرعي على الطبقة الثانية (انظر "اختيار محفظة الطبقة الثانية" أعلاه)، ويُوصَى بشدة بأن يكون لديك بعضًا من الإيثيريوم لرسوم الغاز على أربترم. يمكنك الاطلاع على (التحضير لعملية النقل: تحويل بعضًا من إيثيريوم عبر الجسر." أعلاه). - -يرجى أيضًا ملاحظة أن نقل الرسم البياني الفرعي يتطلب وجود كمية غير صفرية من إشارة التنسيق عليه بنفس الحساب الذي يمتلك الرسم البياني الفرعي ؛ إذا لم تكن قد أشرت إلى الرسم البياني الفرعي ، فسيتعين عليك إضافة القليل من إشارة التنسيق (يكفي إضافة مبلغ صغير مثل 1 GRT). - -بعد فتح أداة النقل، ستتمكن من إدخال عنوان المحفظة في الطبقة الثانية في حقل "عنوان محفظة الاستلام". تأكد من إدخال العنوان الصحيح هنا. بعد ذلك، انقر على "نقل الغراف الفرعي"، وسيتم طلب تنفيذ العملية في محفظتك. (يُرجى ملاحظة أنه يتم تضمين بعضًا من الإثيريوم لدفع رسوم الغاز في الطبقة الثانية). بعد تنفيذ العملية، سيتم بدء عملية النقل وإهمال الغراف الفرعي في الطبقة الأولى. (يمكنك الاطلاع على "فهم ما يحدث مع الإشارة والغراف الفرعي في الطبقة الأولى وعناوين الاستعلام" أعلاه لمزيد من التفاصيل حول ما يحدث خلف الكواليس). - -إذا قمت بتنفيذ هذه الخطوة، \*\*يجب عليك التأكد من أنك ستستكمل الخطوة 3 في غضون 7 أيام، وإلا فإنك ستفقد الغراف الفرعي والإشارة GRT الخاصة بك. يرجع ذلك إلى آلية التواصل بين الطبقة الأولى والطبقة الثانية في أربترم: الرسائل التي ترسل عبر الجسر هي "تذاكر قابلة لإعادة المحاولة" يجب تنفيذها في غضون 7 أيام، وقد يتطلب التنفيذ الأولي إعادة المحاولة إذا كان هناك زيادة في سعر الغاز على أربترم. - -![Start the transfer to L2](/img/startTransferL2.png) - -## الخطوة 2: الانتظار حتى يتم نقل الغراف الفرعي إلى الطبقة الثانية - -بعد بدء عملية النقل، يتعين على الرسالة التي ترسل الـ subgraph من L1 إلى L2 أن يتم نشرها عبر جسر Arbitrum. يستغرق ذلك حوالي 20 دقيقة (ينتظر الجسر لكتلة الشبكة الرئيسية التي تحتوي على المعاملة حتى يتأكد أنها "آمنة" من إمكانية إعادة ترتيب السلسلة). - -بمجرد انتهاء وقت الانتظار ، ستحاول Arbitrum تنفيذ النقل تلقائيًا على عقود L2. - -![شاشة انتظار](/img/screenshotOfWaitScreenL2.png) - -## الخطوة الثالثة: تأكيد التحويل - -في معظم الحالات ، سيتم تنفيذ هذه الخطوة تلقائيًا لأن غاز الطبقة الثانية المضمن في الخطوة 1 يجب أن يكون كافيًا لتنفيذ المعاملة التي تتلقى الغراف الفرعي في عقود أربترم. ومع ذلك ، في بعض الحالات ، من الممكن أن يؤدي ارتفاع أسعار الغاز على أربترم إلى فشل هذا التنفيذ التلقائي. وفي هذه الحالة ، ستكون "التذكرة" التي ترسل غرافك الفرعي إلى الطبقة الثانية معلقة وتتطلب إعادة المحاولة في غضون 7 أيام. - -في هذا الحالة ، فستحتاج إلى الاتصال باستخدام محفظة الطبقة الثانية والتي تحتوي بعضاً من إيثيريوم على أربترم، قم بتغيير شبكة محفظتك إلى أربترم، والنقر فوق "تأكيد النقل" لإعادة محاولة المعاملة. - -![تأكيد النقل إلى الطبقة الثانية](/img/confirmTransferToL2.png) - -## الخطوة 4: إنهاء عملية النقل على L2 - -في هذه المرحلة، تم استلام الغراف الفرعي والـ GRT الخاص بك على أربترم، ولكن الغراف الفرعي لم يتم نشره بعد. ستحتاج إلى الربط باستخدام محفظة الطبقة الثانية التي اخترتها كمحفظة استلام، وتغيير شبكة محفظتك إلى أربترم، ثم النقر على "نشر الغراف الفرعي" - -![نشر الغراف الفرعي](/img/publishSubgraphL2TransferTools.png) - -![انتظر حتى يتم نشر الغراف الفرعي](/img/waitForSubgraphToPublishL2TransferTools.png) - -سيؤدي هذا إلى نشر الغراف الفرعي حتى يتمكن المفهرسون الذين يعملون في أربترم بالبدء في تقديم الخدمة. كما أنه سيعمل أيضًا على إصدار إشارة التنسيق باستخدام GRT التي تم نقلها من الطبقة الأولى. - -## Step 5: Updating the query URL - -تم نقل غرافك الفرعي بنجاح إلى أربترم! للاستعلام عن الغراف الفرعي ، سيكون عنوان URL الجديد هو: - -`https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` - -لاحظ أن ID الغراف الفرعي على أربترم سيكون مختلفًا عن الذي لديك في الشبكة الرئيسية، ولكن يمكنك العثور عليه في المستكشف أو استوديو. كما هو مذكور أعلاه (راجع "فهم ما يحدث للإشارة والغراف الفرعي في الطبقة الأولى وعناوين الاستعلام") سيتم دعم عنوان URL الطبقة الأولى القديم لفترة قصيرة ، ولكن يجب عليك تبديل استعلاماتك إلى العنوان الجديد بمجرد مزامنة الغراف الفرعي على الطبقة الثانية. - -## كيفية نقل التنسيق الخاص بك إلى أربترم (الطبقة الثانية) - -## Understanding what happens to curation on subgraph transfers to L2 - -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. - -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. - -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. - -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. - -## اختيار محفظة L2 الخاصة بك - -If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. - -If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. - -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. - -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** - -## Sending curation to L2: Step 1 - -Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. - -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. - -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. - -![Transfer signal](/img/transferSignalL2TransferTools.png) - -After opening the Transfer Tool, you may be prompted to add some ETH to your wallet if you don't have any. Then you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Signal will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer. - -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retryable tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. - -## Sending curation to L2: step 2 - -Starting the transfer: - -![Send signal to L2](/img/sendingCurationToL2Step2First.png) - -After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). - -بمجرد انتهاء وقت الانتظار ، ستحاول Arbitrum تنفيذ النقل تلقائيًا على عقود L2. - -![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) - -## Sending curation to L2: step 3 - -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. - -في هذا الحالة ، فستحتاج إلى الاتصال باستخدام محفظة الطبقة الثانية والتي تحتوي بعضاً من إيثيريوم على أربترم، قم بتغيير شبكة محفظتك إلى أربترم، والنقر فوق "تأكيد النقل" لإعادة محاولة المعاملة. - -![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) - -## Withdrawing your curation on L1 - -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. diff --git a/website/pages/ar/archived/_meta.js b/website/pages/ar/archived/_meta.js new file mode 100644 index 000000000000..d1341196f7d2 --- /dev/null +++ b/website/pages/ar/archived/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/archived/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ar/archived/arbitrum/_meta.js b/website/pages/ar/archived/arbitrum/_meta.js new file mode 100644 index 000000000000..944ca24a1f62 --- /dev/null +++ b/website/pages/ar/archived/arbitrum/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/archived/arbitrum/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ar/archived/arbitrum/arbitrum-faq.mdx b/website/pages/ar/archived/arbitrum/arbitrum-faq.mdx new file mode 100644 index 000000000000..ea4e5f0b2872 --- /dev/null +++ b/website/pages/ar/archived/arbitrum/arbitrum-faq.mdx @@ -0,0 +1,80 @@ +--- +title: الأسئلة الشائعة حول Arbitrum +--- + +Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. + +## Why did The Graph implement an L2 Solution? + +By scaling The Graph on L2, network participants can now benefit from: + +- Upwards of 26x savings on gas fees + +- سرعة أكبر في المعاملات + +- Security inherited from Ethereum + +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers can open and close allocations more frequently to index a greater number of subgraphs. Developers can deploy and update subgraphs more easily, and Delegators can delegate GRT more frequently. Curators can add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. + +The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. + +## ما الذي يجب علي فعله لاستخدام The Graph في L2؟ + +The Graph’s billing system accepts GRT on Arbitrum, and users will need ETH on Arbitrum to pay their gas. While The Graph protocol started on Ethereum Mainnet, all activity, including the billing contracts, is now on Arbitrum One. + +Consequently, to pay for queries, you need GRT on Arbitrum. Here are a few different ways to achieve this: + +- If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: + + - [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) + - [TransferTo](https://transferto.xyz/swap) + +- If you have other assets on Arbitrum, you can swap them for GRT through a swapping protocol like Uniswap. + +- Alternatively, you can acquire GRT directly on Arbitrum through a decentralized exchange. + +Once you have GRT on Arbitrum, you can add it to your billing balance. + +للاستفادة من استخدام The Graph على L2 ، استخدم قائمة المنسدلة للتبديل بين الشبكات. + +![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) + +## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? + +Network participants must move to Arbitrum to continue participating in The Graph Network. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) for additional support. + +All indexing rewards are now entirely on Arbitrum. + +## Were there any risks associated with scaling the network to L2? + +All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). + +Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). + +## Are existing subgraphs on Ethereum working? + +All subgraphs are now on Arbitrum. Please refer to [L2 Transfer Tool Guide](/archived/arbitrum/l2-transfer-tools-guide/) to ensure your subgraphs operate seamlessly. + +## Does GRT have a new smart contract deployed on Arbitrum? + +Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. + +## الأسئلة الشائعة حول إعداد الفواتير في Arbitrum + +## ما الذي علي فعله بشأن ال GRT في حساب الفوترة الخاص بي ؟ + +لا شئ! لقد تم نقل GRT الخاصة بك بشكل آمن إلى Arbitrum ويتم استخدامها للدفع مقابل الاستعلامات. + +## كيف أعرف أن أموالي قد انتقلت بشكل آمن إلى Arbitrum؟ + +All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). + +## كيف أعرف أن جسر Arbitrum آمن؟ + +The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. + +## ماذا علي أن أفعل إذا قمت بإضافة GRT جديد من محفظة Ethereum mainnet الخاصة بي؟ + +Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. + +Visit the [Billing page](/subgraphs/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. diff --git a/website/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx new file mode 100644 index 000000000000..9c1e9fa8db64 --- /dev/null +++ b/website/pages/ar/archived/arbitrum/l2-transfer-tools-faq.mdx @@ -0,0 +1,411 @@ +--- +title: الأسئلة الشائعة حول أدوات النقل L2 +--- + +## عام + +### ما هي أدوات النقل L2؟ + +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. + +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### هل يمكنني استخدام نفس المحفظة التي استخدمها في Ethereum mainnet؟ + +إذا كنت تستخدم محفظة [EOA] (https://ethereum.org/en/developers/docs/accounts/#types-of-account) ، فيمكنك استخدام نفس العنوان. إذا كانت محفظة Ethereum mainnet الخاصة بك عبارة عن عقد (مثل multisig) ، فيجب عليك تحديد [Arbitrum wallet address](/archived/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) حيث سيتم إرسال التحويل الخاص بك. يرجى التحقق من العنوان بعناية لأن أي تحويلات إلى عنوان غير صحيح يمكن أن تؤدي إلى خسارة غير قابلة للرجوع. إذا كنت ترغب في استخدام multisig على L2 ، فتأكد من نشر عقد multisig على Arbitrum One. + +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### What happens if I don’t finish my transfer in 7 days? + +The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). + +When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). + +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + +## نقل الـ Subgraph (الرسم البياني الفرعي) + +### كيفكيف أقوم بتحويل الـ subgraph الخاص بي؟ + + + +لنقل الـ subgraph الخاص بك ، ستحتاج إلى إكمال الخطوات التالية: + +1. ابدأ التحويل على شبكة Ethereum mainnet + +2. انتظر 20 دقيقة للتأكيد + +3. قم بتأكيد نقل الـ subgraph على Arbitrum \ \* + +4. قم بإنهاء نشر الـ subgraph على Arbitrum + +5. جدث عنوان URL للاستعلام (مستحسن) + +\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### من أين يجب أن أبدأ التحويل ؟ + +يمكنك بدء عملية النقل من [Subgraph Studio] (https://thegraph.com/studio/) ، [Explorer ،] (https://thegraph.com/explorer) أو من أي صفحة تفاصيل subgraph. انقر فوق الزر "Transfer Subgraph" في صفحة تفاصيل الرسم الـ subgraph لبدء النقل. + +### كم من الوقت سأنتظر حتى يتم نقل الـ subgraph الخاص بي + +يستغرق وقت النقل حوالي 20 دقيقة. يعمل جسر Arbitrum في الخلفية لإكمال نقل الجسر تلقائيًا. في بعض الحالات ، قد ترتفع تكاليف الغاز وستحتاج إلى تأكيد المعاملة مرة أخرى. + +### هل سيظل الـ subgraph قابلاً للاكتشاف بعد أن أنقله إلى L2؟ + +سيكون الـ subgraph الخاص بك قابلاً للاكتشاف على الشبكة التي تم نشرها عليها فقط. على سبيل المثال ، إذا كان الـ subgraph الخاص بك موجودًا على Arbitrum One ، فيمكنك العثور عليه فقط في Explorer على Arbitrum One ولن تتمكن من العثور عليه على Ethereum. يرجى التأكد من تحديد Arbitrum One في مبدل الشبكة في أعلى الصفحة للتأكد من أنك على الشبكة الصحيحة. بعد النقل ، سيظهر الـ L1 subgraph على أنه مهمل. + +### هل يلزم نشر الـ subgraph الخاص بي لنقله؟ + +للاستفادة من أداة نقل الـ subgraph ، يجب أن يكون الرسم البياني الفرعي الخاص بك قد تم نشره بالفعل على شبكة Ethereum الرئيسية ويجب أن يكون لديه إشارة تنسيق مملوكة للمحفظة التي تمتلك الرسم البياني الفرعي. إذا لم يتم نشر الرسم البياني الفرعي الخاص بك ، فمن المستحسن أن تقوم ببساطة بالنشر مباشرة على Arbitrum One - ستكون رسوم الغاز أقل بكثير. إذا كنت تريد نقل رسم بياني فرعي منشور ولكن حساب المالك لا يملك إشارة تنسيق عليه ، فيمكنك الإشارة بمبلغ صغير (على سبيل المثال 1 GRT) من ذلك الحساب ؛ تأكد من اختيار إشارة "auto-migrating". + +### ماذا يحدث لإصدار Ethereum mainnet للرسم البياني الفرعي الخاص بي بعد أن النقل إلى Arbitrum؟ + +بعد نقل الرسم البياني الفرعي الخاص بك إلى Arbitrum ، سيتم إهمال إصدار Ethereum mainnet. نوصي بتحديث عنوان URL للاستعلام في غضون 48 ساعة. ومع ذلك ، هناك فترة سماح تحافظ على عمل عنوان URL للشبكة الرئيسية الخاصة بك بحيث يمكن تحديث أي دعم dapp لجهة خارجية. + +### بعد النقل ، هل أحتاج أيضًا إلى إعادة النشر على Arbitrum؟ + +بعد فترة النقل البالغة 20 دقيقة ، ستحتاج إلى تأكيد النقل لإكمال النقل ، أداة النقل ستوجهك للقيام بذلك. سيستمر دعم L1 endpoint الخاص بك خلال فترة النقل وفترة السماح. من المستحسن أن تقوم بتحديثه عندما يكون ذلك مناسبًا لك. + +### Will my endpoint experience downtime while re-publishing? + +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. + +### هل يتم نشر وتخطيط الإصدار بنفس الطريقة في الـ L2 كما هو الحال في شبكة Ethereum Ethereum mainnet؟ + +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. + +### هل سينتقل تنسيق الـ subgraph مع الـ subgraph ؟ + +إذا اخترت إشارة الترحيل التلقائي auto-migrating ، فسيتم نقل 100٪ من التنسيق مع الرسم البياني الفرعي الخاص بك إلى Arbitrum One. سيتم تحويل كل إشارة التنسيق الخاصة بالرسم الفرعي إلى GRT في وقت النقل ، وسيتم استخدام GRT المقابل لإشارة التنسيق الخاصة بك لصك الإشارة على L2 subgraph. + +يمكن للمنسقين الآخرين اختيار ما إذا كانوا سيسحبون أجزاء من GRT ، أو ينقلونه أيضًا إلى L2 لإنتاج إشارة على نفس الرسم البياني الفرعي. + +### هل يمكنني إعادة الرسم البياني الفرعي الخاص بي إلى Ethereum mainnet بعد أن أقوم بالنقل؟ + +بمجرد النقل ، سيتم إهمال إصدار شبكة Ethereum mainnet للرسم البياني الفرعي الخاص بك. إذا كنت ترغب في العودة إلى mainnet ، فستحتاج إلى إعادة النشر (redeploy) والنشر مرة أخرى على mainnet. ومع ذلك ، لا يُنصح بشدة بالتحويل مرة أخرى إلى شبكة Ethereum mainnet حيث سيتم في النهاية توزيع مكافآت الفهرسة بالكامل على Arbitrum One. + +### لماذا أحتاج إلى Bridged ETH لإكمال النقل؟ + +يتم دفع رسوم الغاز في Arbitrum One باستخدام ETHbridged ETH (ETH الذي تم ربطه بـ Arbitrum One). ومع ذلك ، فإن رسوم الغاز أقل بكثير عند مقارنتها بشبكة Ethereum mainnet. + +## Delegation(التفويض) + +### كيف أنقل تفويضي؟ + + + +لنقل تفويضك ، ستحتاج إلى إكمال الخطوات التالية: + +1. ابدأ نقل التفويض على شبكة Ethereum mainnet +2. انتظر 20 دقيقة للتأكيد +3. قم بتأكيد نقل التفويض على Arbitrum + +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### ماذا يحدث لمكافآتي إذا بدأت عملية تحويل وكان لا يزال التخصيص مفتوحا على Ethereum mainnet؟ + +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. + +### ماذا يحدث إذا كان المفهرس الذي أفوضه حاليًا غير موجود في Arbitrum One؟ + +لن يتم تمكين أداة النقل L2 إلا إذا قام المفهرس الذي فوضته بتحويل حصته إلى Arbitrum. + +### هل يملك المفوضين خيارا للتفويض إلى مفهرس آخر؟ + +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. + +### ماذا لو لم أتمكن من العثور على المفهرس الذي قمت بالتوفيض إليه في L2؟ + +ستكتشف أداة النقل L2 المفهرس الذي قمت بالتفويض إليه مسبقًا تلقائيًا. + +### هل سأكون قادرًا على المزج والمطابقة أو "نشر" تفويضي عبر مفهرس جديد أو مفهرسين جدد بدلاً من المفهرس السابق؟ + +ستعمل أداة نقل L2 دائمًا على نقل التفويض إلى نفس المفهرس الذي فوضته سابقًا. وبمجرد الانتقال إلى L2 ، يمكنك إلغاء التفويض وانتظار فترة الذوبان ، وتحديد ما إذا كنت ترغب في تقسيم التفويض الخاص بك. + +### هل أنا خاضع لفترة الانتظار أم يمكنني السحب فورًا بعد استخدام أداة نقل التفويض L2؟ + +تتيح لك أداة النقل الانتقال على الفور إلى L2. إذا كنت ترغب في إلغاء التفويض ، فسيتعين عليك انتظار فترة الذوبان. ومع ذلك ، إذا قام المفهرس بتحويل جميع حصته إلى L2 ، فيمكنك السحب على شبكة Ethereum mainnet فورا. + +### هل يمكن أن تتأثر مكافآتي سلبًا إذا لم أحول تفويضي؟ + +من المتوقع أن تنتقل جميع مشاركات الشبكة إلى Arbitrum One في المستقبل. + +### كم من الوقت يستغرق استكمال نقل تفويضي إلى L2؟ + +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? + +Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? + +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. + +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. + +### هل هناك ضريبة على التفويض؟ + +لا ، يتم تفويض التوكن المستلمة على L2 إلى المفهرس المحدد نيابة عن المفوض المحدد دون فرض ضريبة التفويض. + +### Will my unrealized rewards be transferred when I transfer my delegation? + +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. + +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ + +### Is moving delegations to L2 mandatory? Is there a deadline? + +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? + +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. + +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### I don't see a button to transfer my delegation. Why is that? + +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. + +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? + +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? + +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + +## Curation Signal(إشارة التنسيق) + +### كيف أنقل إشارة التنسيق الخاص بي؟ + +لنقل التنسيق الخاص بك ، ستحتاج إلى إكمال الخطوات التالية: + +1. ابدأ نقل الإشارة على شبكة Ethereum mainnet + +2. حدد عنوان L2 للمنسق \ \* + +3. انتظر 20 دقيقة للتأكيد + +\ \* إذا لزم الأمر -أنت تستخدم عنوان عقد. + +### كيف سأعرف ما إذا كان الرسم البياني الفرعي الذي قمت بعمل إشارة تنسيق عليه قد انتقل إلى L2؟ + +عند عرض صفحة تفاصيل الرسم البياني الفرعي ، ستعلمك لافتة بأنه تم نقل هذا الرسم البياني الفرعي. يمكنك اتباع التعليمات لنقل إشارة التنسيق الخاص بك. يمكنك أيضًا العثور على هذه المعلومات في صفحة تفاصيل الرسم البياني الفرعي لأي رسم بياني فرعي تم نقله. + +### ماذا لو كنت لا أرغب في نقل إشارة التنسيق الخاص بي إلى L2؟ + +عندما يتم إهمال الرسم البياني الفرعي ، يكون لديك خيار سحب الإشارة. وبالمثل ، إذا انتقل الرسم البياني الفرعي إلى L2 ، فيمكنك اختيار سحب الإشارة في شبكة Ethereum الرئيسية أو إرسال الإشارة إلى L2. + +### كيف أعرف أنه تم نقل إشارة التنسيق بنجاح؟ + +يمكن الوصول إلى تفاصيل الإشارة عبر Explorer بعد حوالي 20 دقيقة من بدء أداة النقل للـ L2. + +### هل يمكنني نقل إشاة التنسيق الخاص بي على أكثر من رسم بياني فرعي في وقت واحد؟ + +لا يوجد خيار كهذا حالياً. + +## Indexer Stake(حصة المفهرس) + +### كيف يمكنني تحويل حصتي إلى Arbitrum؟ + +> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. + + + +لتحويل حصتك ، ستحتاج إلى إكمال الخطوات التالية: + +1. ابدأ تحويل الحصص على شبكة Ethereum mainnet + +2. انتظر 20 دقيقة للتأكيد + +3. Confirm stake transfer on Arbitrum + +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### هل سيتم تحويل حصتي بالكامل؟ + +يمكنك اختيار مقدار حصتك المراد تحويلها. إذا اخترت تحويل حصتك بالكامل مرة واحدة ، فستحتاج إلى إغلاق أي تخصيصات مفتوحة أولاً. + +إذا كنت تخطط لنقل أجزاء من حصتك في معاملات متعددة ، فيجب عليك دائمًا تحديد نفس عنوان المستفيد. + +ملاحظة: يجب أن تفي بالحد الأدنى من متطلبات الحصة على L2 في المرة الأولى التي تستخدم فيها أداة التحويل. يجب أن يرسل المفهرسون 100 ألف GRT كحد أدنى (عند استدعاء هذه الوظيفة في المرة الأولى). في حالة ترك جزء من الحصة على L1 ، يجب أن يكون أيضًا أكثر من 100 ألف GRT كحد أدنى وأن يكون كافيًا (جنبًا إلى جنب مع التفويضات) لتغطية مخصصاتك المفتوحة. + +### كم من الوقت لدي لتأكيد تحويل حصتي إلى Arbitrum؟ + +\ _ \ _ \ \* يجب تأكيد معاملتك لإتمام تحويل الحصة على Arbitrum. يجب إكمال هذه الخطوة في غضون 7 أيام وإلا فقد يتم فقدان الحصة. + +### ماذا لو كان لدي تخصيصات مفتوحة؟ + +إذا كنت لا ترسل كل حصصك، فإن أداة نقل L2 ستتحقق من أن الحد الأدنى 100 ألف GRT لا يزال في شبكة Ethereum mainnet وأن حصتك المتبقية وتفويضك كافيان لتغطية أي تخصيصات مفتوحة. قد تحتاج إلى إغلاق التخصيصات المفتوحة إذا كان رصيد GRT الخاص بك لا يغطي الحد الأدنى + المخصصات المفتوحة. + +### باستخدام أدوات النقل ، هل من الضروري الانتظار 28 يومًا لإلغاء الحصة في Ethereum mainnet قبل التحويل؟ + +لا ، يمكنك تحويل حصتك إلى L2 على الفور ، ولا داعي لإلغاء حصتك والانتظار قبل استخدام أداة التحويل. لا يسري الانتظار لمدة 28 يومًا إلا إذا كنت ترغب في سحب الحصة إلى محفظتك ، على شبكة Ethereum mainnet أو L2. + +### كم من الوقت سيستغرق تحويل حصتي؟ + +ستستغرق أداة النقل L2 حوالي 20 دقيقة لإكمال تحويل حصتك. + +### هل يجب أن أقوم بالفهرسة على Arbitrum قبل أن أنقل حصتي؟ + +يمكنك تحويل حصتك بشكل فعال أولاً قبل إعداد الفهرسة ، ولكن لن تتمكن من المطالبة بأي مكافآت على L2 حتى تقوم بتخصيصها لـ subgraphs على L2 وفهرستها وعرض POIs. + +### هل يستطيع المفوضون نقل تفويضهم قبل نقل indexing stake الخاص بي؟ + +لا ، لكي يقوم المفوضون بنقل GRT المفوضة إلى Arbitrum ، يجب أن يكون المفهرس الذي يتم التفويض إليه نشطًا في L2. + +### Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? + +Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? + +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ + +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? + +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. + +## نقل Vesting Contract(عقد الاستحقاق) + +### كيف أنقل عقد الاستحقاق الخاص بي؟ + +لتحويل استحقاقك ، ستحتاج إلى إكمال الخطوات التالية: + +1. ابدأ تحويل الاستحقاق على شبكة Ethereum mainnet + +2. انتظر 20 دقيقة للتأكيد + +3. قم بالتأكيد على نقل الاستحقاق على Arbitrum + +### كيف يمكنني تحويل عقد الاستحقاق الخاص بي إذا كنت مخولًا جزئيًا فقط؟ + + + +1. قم بإيداع بعض ETH في عقد أداة النقل (يمكن أن تساعد واجهة المستخدم في تقدير مبلغ معقول) + +2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. + +3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. + +4. سحب أي ETH متبقي من عقد أداة النقل(transfer tool contract) + +### كيف يمكنني تحويل عقد الاستحقاق الخاص بي إذا كنت مخولًا بالكامل؟ + + + +بالنسبة لمن يتمتعون بكامل الصلاحيات ، فإن العملية مماثلة: + +1. قم بإيداع بعض ETH في عقد أداة النقل (يمكن أن تساعد واجهة المستخدم في تقدير مبلغ معقول) + +2. قم بتعيين عنوان L2 الخاص بك من خلال استدعاء عقد أداة النقل + +3. أرسل حصتك / تفويضك إلى L2 من خلال وظائف أداة التحويل "locked" في L1 Staking contract. + +4. سحب أي ETH متبقي من عقد أداة النقل(transfer tool contract) + +### هل يمكنني نقل عقد الاستحقاق الخاص بي إلى Arbitrum؟ + +يمكنك تحويل رصيد GRT الخاص بعقد الاستحقاق الخاص بك إلى عقد استحقاق آخر على L2. هذا شرط أساسي لنقل الحصة أو التفويض من عقد الاستحقاق الخاص بك إلى L2. يجب أن يحتوي عقد الاستحقاق على مبلغ غير صفري من GRT (يمكنك تحويل مبلغ صغير إليه مثل 1 GRT إذا لزم الأمر). + +عندما تقوم بتحويل GRT من عقد الاستحقاق L1 الخاص بك إلى L2 ، يمكنك اختيار المبلغ الذي تريد إرساله ويمكنك القيام بذلك عدة مرات. سيتم بدء عقد الاستحقاق على L2 في المرة الأولى التي تقوم فيها بتحويل GRT. + +تتم عمليات النقل باستخدام أداة النقل(Transfer Tool) التي ستكون مرئية في ملف تعريف Explorer الخاص بك عند الاتصال بحساب عقد الاستحقاق. + +يرجى ملاحظة أنك لن تكون قادرًا على استخلاص/ سحب GRT من عقد الاستحقاق على L2 حتى نهاية الجدول الزمني للاستحقاق عندما يتم تخويل عقدك بالكامل. إذا كنت بحاجة لتحرير GRT قبل ذلك الحين ، فيمكنك إعادة نقل GRT إلى عقد الاستحقاق على L1 باستخدام أداة تحويل أخرى متاحة لهذا الغرض. + +إذا لم تقم بتحويل أي رصيد من عقود الاستحقاق إلى L2 ، وكان عقد الاستحقاق الخاص بك مخولًا بالكامل ، فلا يجب عليك تحويل عقد الاستحقاق الخاص بك إلى L2. بدلاً من ذلك ، يمكنك استخدام أدوات التحويل لتعيين عنوان محفظة L2 ، وتحويل حصتك أو تفويضك مباشرةً إلى هذه المحفظة العادية على L2. + +### أنا أستخدم عقد الاستحقاق الخاص بي للقيام بالتخزين (staking) في mainnet. هل يمكنني تحويل حصتي إلى Arbitrum؟ + +نعم ، ولكن إذا كان عقدك لا يزال مستحقًا ، فيمكنك فقط نقل الحصة بحيث تكون مملوكة لعقد الاستحقاق L2 الخاص بك. يجب أولاً تهيئة عقد L2 هذا عن طريق تحويل بعض رصيد GRT باستخدام أداة تحويل عقد الاستحقاق في Explorer. إذا كان عقدك مخولًا بالكامل ، فيمكنك تحويل حصتك إلى أي عنوان على L2 ، ولكن يجب عليك تعيينها مسبقًا وإيداع بعض ETH لأداة التحويل L2 لدفع ثمن غاز L2. + +### أنا أستخدم عقد الاستحقاق الخاص بي للتفويض على mainnet. هل يمكنني نقل تفويضاتي إلى Arbitrum؟ + +نعم ، ولكن إذا كان عقدك لا يزال مستحقًا ، فيمكنك فقط نقل التفويض بحيث يكون مملوكًا لعقد الاستحقاق L2 الخاص بك. يجب أولاً تهيئة عقد L2 هذا عن طريق تحويل بعضا من رصيد GRT باستخدام أداة تحويل عقد الاستحقاق في Explorer. إذا كان عقدك مخولًا بالكامل ، فيمكنك نقل تفويضك إلى أي عنوان في L2 ، ولكن يجب عليك تعيينه مسبقًا وإيداع بعض ETH لأداة التحويل L2 لدفع ثمن غاز L2. + +### هل يمكنني تحديد مستفيد مختلف لعقد الاستحقاق الخاص بي على L2؟ + +نعم ، في المرة الأولى التي تقوم فيها بتحويل رصيد وإعداد عقد استحقاق L2 ، يمكنك تحديد مستفيد من L2. تأكد من أن هذا المستفيد عبارة عن محفظة يمكنها إجراء المعاملات على Arbitrum One ، يجب أن تكون EOA أو multisig تم نشرها على Arbitrum One. + +إذا كان عقدك مخولًا بالكامل ، فلن تقوم بإعداد عقد استحقاق على L2 ؛ بدلاً من ذلك ، ستقوم بتعيين عنوان محفظة L2 وستكون هذه هي المحفظة المستلمة لحصتك أو تفويضك في Arbitrum. + +### عقدي مخول بالكامل. هل يمكنني نقل حصتي أو تفويضي إلى عنوان آخر ليس عقداً استحقاقيا على L2؟ + +نعم. إذا لم تقم بتحويل أي رصيد من عقود الاستحقاق إلى L2 ، وكان عقد الاستحقاق الخاص بك مخولًا بالكامل ، فلا يجب عليك تحويل عقد الاستحقاق الخاص بك إلى L2. بدلاً من ذلك ، يمكنك استخدام أدوات التحويل لتعيين عنوان محفظة L2 ، وتحويل حصتك أو تفويضك مباشرةً إلى هذه المحفظة العادية على L2. + +هذا يسمح لك بتحويل حصتك أو تفويضك إلى أي عنوان L2. + +### عقد الاستحقاق الخاص بي لا يزال مستحقًا. كيف أقوم بتحويل رصيد عقد الاستحقاق الخاص بي إلى L2؟ + +تنطبق هذه الخطوات فقط إذا كان عقدك لا يزال مستحقًا ، أو إذا كنت قد استخدمت هذه العملية من قبل عندما كان عقدك لا يزال مستحقًا. + +لتحويل عقد الاستحقاق الخاص بك إلى L2 ، سوف ترسل أي رصيد GRT إلى L2 باستخدام أدوات التحويل ، والتي ستعمل على تهيئة عقد استحقاق L2 الخاص بك: + +1. قم بإيداع بعض ETH في عقد أداة النقل (سيتم استخدام هذا لدفع ثمن غاز L2) + +2. إبطال وصول البروتوكول إلى عقد الاستحقاق (مطلوب للخطوة التالية) + +3. امنح البروتوكول حق الوصول إلى عقد الاستحقاق (سيسمح لعقدك بالتفاعل مع أداة التحويل) + +4. حدد عنوان المستفيد على L2 \ \* وابدأ في تحويل الرصيد على Ethereum mainnet + +5. انتظر 20 دقيقة للتأكيد + +6. قم بتأكيد تحويل الرصيد على L2 + +\ \* إذا لزم الأمر -أنت تستخدم عنوان عقد. + +\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### هل يمكنني إرجاع عقد الاستحقاق إلى L1؟ + +ليست هناك حاجة للقيام بذلك لأن عقد الاستحقاق الخاص بك لا يزال في L1. عندما تستخدم أدوات التحويل ، فأنت تقوم فقط بإنشاء عقد جديد في L2 مرتبط بعقد الاستحقاق L1 الخاص بك ، ويمكنك إرسال GRT ذهابًا وإيابًا بينهما. + +### لماذا أحتاج إلى تغيير عقد الاستحقاق الخاص بي من البداية؟ + +يجب عليك إعداد عقد استحقاق L2 حتى يتمكن هذا الحساب من امتلاك حصتك أو تفويضك في L2. وإلا ، لن يكون هناك وسيلة لك لنقل الحصة / التفويض إلى L2 دون "الهروب" من عقد الاستحقاق. + +### ماذا يحدث إذا حاولت سحب عقدي عندما لم يتم تنفيذه بالكامل؟هل هذا ممكن؟ + +هذا ليس احتمال. يمكنك إعادة الأموال إلى L1 وسحبها هناك. + +### ماذا لو لم أرغب في نقل عقد الاستحقاق الخاص بي إلى L2؟ + +يمكنك الاستمرار في التخزين / التفويض على L1. بمرور الوقت ، قد ترغب في التفكير في الانتقال إلى L2 لتمكين المكافآت هناك حيث يتوسع البروتوكول في Arbitrum. لاحظ أن أدوات التحويل هذه مخصصة لمنح العقود المسموح لها بالمشاركة والتفويض في البروتوكول. إذا كان عقدك لا يسمح بالتخزين أو التفويض ، أو كان قابلاً للإلغاء ، فلا توجد أداة نقل متاحة. ستظل قادرًا على سحب GRT من L1 عندما يكون ذلك متاحًا. diff --git a/website/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx new file mode 100644 index 000000000000..ed881d8de079 --- /dev/null +++ b/website/pages/ar/archived/arbitrum/l2-transfer-tools-guide.mdx @@ -0,0 +1,165 @@ +--- +title: L2 Transfer Tools Guide +--- + +يسهل الغراف الانتقال إلى الطبقة الثانبة على أربترم. لكل مشارك في البروتوكول ، توجد مجموعة من أدوات نقل الطبقة الثانبة لجعل النقل إليها سلسًا لجميع المشاركين في الشبكة. ستطلب منك هذه الأدوات اتباع مجموعة محددة من الخطوات بناءً على ما تقوم بنقله. + +بعض الأسئلة المتكررة حول هذه الأدوات تمت الإجابة عليها في [الأسئلة الشائعة حول أدوات نقل الطبقة الثانية] (/archived/arbitrum/l2-transfer-tools-faq/). تحتوي الأسئلة الشائعة على تفسيرات متعمقة لكيفية استخدام الأدوات وكيفية عملها والأمور التي يجب وضعها في الاعتبار عند إستخدامها. + +## كيف تنقل الغراف الفرعي الخاص بك إلى شبكة آربترم (الطبقة الثانية) + + + +## فوائد نقل الغراف الفرعي الخاصة بك + +مجتمع الغراف والمطورون الأساسيون كانوا [يستعدون] (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) للإنتقال إلى آربترم على مدى العام الماضي. وتعتبر آربترم سلسلة كتل من الطبقة الثانية أو "L2"، حيث ترث الأمان من سلسلة الإيثيريوم ولكنها توفر رسوم غازٍ أقل بشكلٍ كبير. + +عندما تقوم بنشر أو ترقية الغرافات الفرعية الخاصة بك إلى شبكة الغراف، فأنت تتفاعل مع عقودٍ ذكيةٍ في البروتوكول وهذا يتطلب دفع رسوم الغاز باستخدام عملة الايثيريوم. من خلال نقل غرافاتك الفرعية إلى آربترم، فإن أي ترقيات مستقبلية لغرافك الفرعي ستتطلب رسوم غازٍ أقل بكثير. الرسوم الأقل، وكذلك حقيقة أن منحنيات الترابط التنسيقي على الطبقة الثانية مستقيمة، تجعل من الأسهل على المنسِّقين الآخرين تنسيق غرافك الفرعي، ممّا يزيد من مكافآت المفهرِسين على غرافك الفرعي. هذه البيئة ذات التكلفة-الأقل كذلك تجعل من الأرخص على المفهرسين أن يقوموا بفهرسة وخدمة غرافك الفرعي. سوف تزداد مكافآت الفهرسة على آربترم وتتناقص على شبكة إيثيريوم الرئيسية على مدى الأشهر المقبلة، لذلك سيقوم المزيد والمزيد من المُفَهرِسين بنقل ودائعهم المربوطة وتثبيت عملياتهم على الطبقة الثانية. + +## فهم ما يحدث مع الإشارة وغرافك الفرعي على الطبقة الأولى وعناوين مواقع الإستعلام + +عند نقل سبجراف إلى Arbitrum، يتم استخدام جسر Arbitrum GRT، الذي بدوره يستخدم جسر Arbitrum الأصلي، لإرسال السبجراف إلى L2. سيؤدي عملية "النقل" إلى إهمال السبجراف على شبكة الإيثيريوم الرئيسية وإرسال المعلومات لإعادة إنشاء السبجراف على L2 باستخدام الجسر. ستتضمن أيضًا رصيد GRT المرهون المرتبط بمالك السبجراف، والذي يجب أن يكون أكبر من الصفر حتى يقبل الجسر النقل. + +عندما تختار نقل الرسم البياني الفرعي ، سيؤدي ذلك إلى تحويل جميع إشارات التنسيق الخاصة بالرسم الفرعي إلى GRT. هذا يعادل "إهمال" الرسم البياني الفرعي على الشبكة الرئيسية. سيتم إرسال GRT المستخدمة لعملية التنسيق الخاصة بك إلى L2 جمباً إلى جمب مع الرسم البياني الفرعي ، حيث سيتم استخدامها لإنتاج الإشارة نيابة عنك. + +يمكن للمنسقين الآخرين اختيار ما إذا كانوا سيسحبون جزء من GRT الخاص بهم ، أو نقله أيضًا إلى L2 لصك إشارة على نفس الرسم البياني الفرعي. إذا لم يقم مالك الرسم البياني الفرعي بنقل الرسم البياني الفرعي الخاص به إلى L2 وقام بإيقافه يدويًا عبر استدعاء العقد ، فسيتم إخطار المنسقين وسيتمكنون من سحب تنسيقهم. + +بمجرد نقل الرسم البياني الفرعي ، لن يتلقى المفهرسون بعد الآن مكافآت لفهرسة الرسم البياني الفرعي، نظرًا لأنه يتم تحويل كل التنسيق لـ GRT. ومع ذلك ، سيكون هناك مفهرسون 1) سيستمرون في خدمة الرسوم البيانية الفرعية المنقولة لمدة 24 ساعة ، و 2) سيبدأون فورًا في فهرسة الرسم البياني الفرعي على L2. ونظرًا لأن هؤلاء المفهرسون لديهم بالفعل رسم بياني فرعي مفهرس ، فلا داعي لانتظار مزامنة الرسم البياني الفرعي ، وسيكون من الممكن الاستعلام عن الرسم البياني الفرعي على L2 مباشرة تقريبًا. + +يجب إجراء الاستعلامات على الرسم البياني الفرعي في L2 على عنوان URL مختلف (على \`` Arbitrum-gateway.thegraph.com`) ، لكن عنوان URL L1 سيستمر في العمل لمدة 48 ساعة على الأقل. بعد ذلك ، ستقوم بوابة L1 بإعادة توجيه الاستعلامات إلى بوابة L2 (لبعض الوقت) ، ولكن هذا سيضيف زمن تأخير لذلك يوصى تغيير جميع استعلاماتك إلى عنوان URL الجديد في أقرب وقت ممكن. + +## اختيار محفظة L2 الخاصة بك + +عندما قمت بنشر subgraph الخاص بك على الشبكة الرئيسية ، فقد استخدمت محفظة متصلة لإنشاء subgraph ، وتمتلك هذه المحفظة NFT الذي يمثل هذا subgraph ويسمح لك بنشر التحديثات. + +عند نقل الرسم البياني الفرعي إلى Arbitrum ، يمكنك اختيار محفظة مختلفة والتي ستمتلك هذا الـ subgraph NFT على L2. + +إذا كنت تستخدم محفظة "عادية" مثل MetaMask (حساب مملوك خارجيًا EOA ، محفظة ليست بعقد ذكي) ، فهذا اختياري ويوصى بالاحتفاظ بعنوان المالك نفسه كما في L1. + +إذا كنت تستخدم محفظة بعقد ذكي ، مثل multisig (على سبيل المثال Safe) ، فإن اختيار عنوان مختلف لمحفظة L2 أمر إلزامي ، حيث من المرجح أن هذا الحساب موجود فقط على mainnet ولن تكون قادرًا على إجراء المعاملات على Arbitrum باستخدام هذه المحفظة. إذا كنت ترغب في الاستمرار في استخدام محفظة عقد ذكية أو multisig ، فقم بإنشاء محفظة جديدة على Arbitrum واستخدم عنوانها كمالك للرسم البياني الفرعي الخاص بك على L2. + +** من المهم جدًا استخدام عنوان محفظة تتحكم فيه ، ويمكنه إجراء معاملات على Arbitrum. وإلا فسيتم فقد الرسم البياني الفرعي ولا يمكن استعادته. ** + +## التحضير لعملية النقل: إنشاء جسر لـبعض ETH + +يتضمن نقل الغراف الفرعي إرسال معاملة عبر الجسر ، ثم تنفيذ معاملة أخرى على شبكة أربترم. تستخدم المعاملة الأولى الإيثيريوم على الشبكة الرئيسية ، وتتضمن بعضًا من إيثيريوم لدفع ثمن الغاز عند استلام الرسالة على الطبقة الثانية. ومع ذلك ، إذا كان هذا الغاز غير كافٍ ، فسيتعين عليك إعادة إجراء المعاملة ودفع ثمن الغاز مباشرةً على الطبقة الثانية (هذه هي "الخطوة 3: تأكيد التحويل" أدناه). يجب تنفيذ هذه الخطوة ** في غضون 7 أيام من بدء التحويل **. علاوة على ذلك ، سيتم إجراء المعاملة الثانية مباشرة على شبكة أربترم ("الخطوة 4: إنهاء التحويل على الطبقة الثانية"). لهذه الأسباب ، ستحتاج بعضًا من إيثيريوم في محفظة أربترم. إذا كنت تستخدم متعدد التواقيع أو عقداً ذكياً ، فيجب أن يكون هناك بعضًا من إيثيريوم في المحفظة العادية (حساب مملوك خارجيا) التي تستخدمها لتنفيذ المعاملات ، وليس على محفظة متعددة التواقيع. + +يمكنك شراء إيثيريوم من بعض المنصات وسحبها مباشرة إلى أربترم، أو يمكنك استخدام جسر أربترم لإرسال إيثيريوم من محفظة الشبكة الرئيسيةإلى الطبقة الثانية: [bridge.arbitrum.io] (http://bridge.arbitrum.io). نظرًا لأن رسوم الغاز على أربترم أقل ، فستحتاج فقط إلى مبلغ صغير. من المستحسن أن تبدأ بمبلغ منخفض (0 على سبيل المثال ، 01 ETH) للموافقة على معاملتك. + +## العثور على أداة نقل الغراف الفرعي + +يمكنك العثور على أداة نقل L2 في صفحة الرسم البياني الفرعي الخاص بك على Subgraph Studio: + +![أداة النقل](/img/L2-transfer-tool1.png) + +إذا كنت متصلاً بالمحفظة التي تمتلك الغراف الفرعي، فيمكنك الوصول إليها عبر المستكشف، وذلك عن طريق الانتقال إلى صفحة الغراف الفرعي على المستكشف: + +![Transferring to L2](/img/transferToL2.png) + +سيؤدي النقر فوق زر النقل إلى الطبقة الثانية إلى فتح أداة النقل حيث يمكنك بدء عملية النقل. + +## الخطوة 1: بدء عملية النقل + +قبل بدء عملية النقل، يجب أن تقرر أي عنوان سيكون مالكًا للغراف الفرعي على الطبقة الثانية (انظر "اختيار محفظة الطبقة الثانية" أعلاه)، ويُوصَى بشدة بأن يكون لديك بعضًا من الإيثيريوم لرسوم الغاز على أربترم. يمكنك الاطلاع على (التحضير لعملية النقل: تحويل بعضًا من إيثيريوم عبر الجسر." أعلاه). + +يرجى أيضًا ملاحظة أن نقل الرسم البياني الفرعي يتطلب وجود كمية غير صفرية من إشارة التنسيق عليه بنفس الحساب الذي يمتلك الرسم البياني الفرعي ؛ إذا لم تكن قد أشرت إلى الرسم البياني الفرعي ، فسيتعين عليك إضافة القليل من إشارة التنسيق (يكفي إضافة مبلغ صغير مثل 1 GRT). + +بعد فتح أداة النقل، ستتمكن من إدخال عنوان المحفظة في الطبقة الثانية في حقل "عنوان محفظة الاستلام". تأكد من إدخال العنوان الصحيح هنا. بعد ذلك، انقر على "نقل الغراف الفرعي"، وسيتم طلب تنفيذ العملية في محفظتك. (يُرجى ملاحظة أنه يتم تضمين بعضًا من الإثيريوم لدفع رسوم الغاز في الطبقة الثانية). بعد تنفيذ العملية، سيتم بدء عملية النقل وإهمال الغراف الفرعي في الطبقة الأولى. (يمكنك الاطلاع على "فهم ما يحدث مع الإشارة والغراف الفرعي في الطبقة الأولى وعناوين الاستعلام" أعلاه لمزيد من التفاصيل حول ما يحدث خلف الكواليس). + +إذا قمت بتنفيذ هذه الخطوة، \*\*يجب عليك التأكد من أنك ستستكمل الخطوة 3 في غضون 7 أيام، وإلا فإنك ستفقد الغراف الفرعي والإشارة GRT الخاصة بك. يرجع ذلك إلى آلية التواصل بين الطبقة الأولى والطبقة الثانية في أربترم: الرسائل التي ترسل عبر الجسر هي "تذاكر قابلة لإعادة المحاولة" يجب تنفيذها في غضون 7 أيام، وقد يتطلب التنفيذ الأولي إعادة المحاولة إذا كان هناك زيادة في سعر الغاز على أربترم. + +![Start the transfer to L2](/img/startTransferL2.png) + +## الخطوة 2: الانتظار حتى يتم نقل الغراف الفرعي إلى الطبقة الثانية + +بعد بدء عملية النقل، يتعين على الرسالة التي ترسل الـ subgraph من L1 إلى L2 أن يتم نشرها عبر جسر Arbitrum. يستغرق ذلك حوالي 20 دقيقة (ينتظر الجسر لكتلة الشبكة الرئيسية التي تحتوي على المعاملة حتى يتأكد أنها "آمنة" من إمكانية إعادة ترتيب السلسلة). + +بمجرد انتهاء وقت الانتظار ، ستحاول Arbitrum تنفيذ النقل تلقائيًا على عقود L2. + +![شاشة انتظار](/img/screenshotOfWaitScreenL2.png) + +## الخطوة الثالثة: تأكيد التحويل + +في معظم الحالات ، سيتم تنفيذ هذه الخطوة تلقائيًا لأن غاز الطبقة الثانية المضمن في الخطوة 1 يجب أن يكون كافيًا لتنفيذ المعاملة التي تتلقى الغراف الفرعي في عقود أربترم. ومع ذلك ، في بعض الحالات ، من الممكن أن يؤدي ارتفاع أسعار الغاز على أربترم إلى فشل هذا التنفيذ التلقائي. وفي هذه الحالة ، ستكون "التذكرة" التي ترسل غرافك الفرعي إلى الطبقة الثانية معلقة وتتطلب إعادة المحاولة في غضون 7 أيام. + +في هذا الحالة ، فستحتاج إلى الاتصال باستخدام محفظة الطبقة الثانية والتي تحتوي بعضاً من إيثيريوم على أربترم، قم بتغيير شبكة محفظتك إلى أربترم، والنقر فوق "تأكيد النقل" لإعادة محاولة المعاملة. + +![تأكيد النقل إلى الطبقة الثانية](/img/confirmTransferToL2.png) + +## الخطوة 4: إنهاء عملية النقل على L2 + +في هذه المرحلة، تم استلام الغراف الفرعي والـ GRT الخاص بك على أربترم، ولكن الغراف الفرعي لم يتم نشره بعد. ستحتاج إلى الربط باستخدام محفظة الطبقة الثانية التي اخترتها كمحفظة استلام، وتغيير شبكة محفظتك إلى أربترم، ثم النقر على "نشر الغراف الفرعي" + +![نشر الغراف الفرعي](/img/publishSubgraphL2TransferTools.png) + +![انتظر حتى يتم نشر الغراف الفرعي](/img/waitForSubgraphToPublishL2TransferTools.png) + +سيؤدي هذا إلى نشر الغراف الفرعي حتى يتمكن المفهرسون الذين يعملون في أربترم بالبدء في تقديم الخدمة. كما أنه سيعمل أيضًا على إصدار إشارة التنسيق باستخدام GRT التي تم نقلها من الطبقة الأولى. + +## Step 5: Updating the query URL + +تم نقل غرافك الفرعي بنجاح إلى أربترم! للاستعلام عن الغراف الفرعي ، سيكون عنوان URL الجديد هو: + +`https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` + +لاحظ أن ID الغراف الفرعي على أربترم سيكون مختلفًا عن الذي لديك في الشبكة الرئيسية، ولكن يمكنك العثور عليه في المستكشف أو استوديو. كما هو مذكور أعلاه (راجع "فهم ما يحدث للإشارة والغراف الفرعي في الطبقة الأولى وعناوين الاستعلام") سيتم دعم عنوان URL الطبقة الأولى القديم لفترة قصيرة ، ولكن يجب عليك تبديل استعلاماتك إلى العنوان الجديد بمجرد مزامنة الغراف الفرعي على الطبقة الثانية. + +## كيفية نقل التنسيق الخاص بك إلى أربترم (الطبقة الثانية) + +## Understanding what happens to curation on subgraph transfers to L2 + +When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. + +This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. + +A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. + +At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. + +## اختيار محفظة L2 الخاصة بك + +If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. + +If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. + +If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. + +**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** + +## Sending curation to L2: Step 1 + +Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. + +If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. + +When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. + +![Transfer signal](/img/transferSignalL2TransferTools.png) + +After opening the Transfer Tool, you may be prompted to add some ETH to your wallet if you don't have any. Then you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Signal will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer. + +If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retryable tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. + +## Sending curation to L2: step 2 + +Starting the transfer: + +![Send signal to L2](/img/sendingCurationToL2Step2First.png) + +After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). + +بمجرد انتهاء وقت الانتظار ، ستحاول Arbitrum تنفيذ النقل تلقائيًا على عقود L2. + +![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) + +## Sending curation to L2: step 3 + +In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. + +في هذا الحالة ، فستحتاج إلى الاتصال باستخدام محفظة الطبقة الثانية والتي تحتوي بعضاً من إيثيريوم على أربترم، قم بتغيير شبكة محفظتك إلى أربترم، والنقر فوق "تأكيد النقل" لإعادة محاولة المعاملة. + +![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) + +## Withdrawing your curation on L1 + +If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. diff --git a/website/pages/ar/archived/sunrise.mdx b/website/pages/ar/archived/sunrise.mdx new file mode 100644 index 000000000000..f9419c36d642 --- /dev/null +++ b/website/pages/ar/archived/sunrise.mdx @@ -0,0 +1,79 @@ +--- +title: Post-Sunrise + Upgrading to The Graph Network FAQ +--- + +> Note: The Sunrise of Decentralized Data ended June 12th, 2024. + +## What was the Sunrise of Decentralized Data? + +The Sunrise of Decentralized Data was an initiative spearheaded by Edge & Node. This initiative enabled subgraph developers to upgrade to The Graph’s decentralized network seamlessly. + +This plan drew on previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs. + +### What happened to the hosted service? + +The hosted service query endpoints are no longer available, and developers cannot deploy new subgraphs on the hosted service. + +During the upgrade process, owners of hosted service subgraphs could upgrade their subgraphs to The Graph Network. Additionally, developers were able to claim auto-upgraded subgraphs. + +### Was Subgraph Studio impacted by this upgrade? + +No, Subgraph Studio was not impacted by Sunrise. Subgraphs were immediately available for querying, powered by the upgrade Indexer, which uses the same infrastructure as the hosted service. + +### Why were subgraphs published to Arbitrum, did it start indexing a different network? + +The Graph Network was initially deployed on Ethereum mainnet but was later moved to Arbitrum One in order to lower gas costs for all users. As a result, all new subgraphs are published to The Graph Network on Arbitrum so that Indexers can support them. Arbitrum is the network that subgraphs are published to, but subgraphs can index any of the [supported networks](/supported-networks/) + +## About the Upgrade Indexer + +> The upgrade Indexer is currently active. + +The upgrade Indexer was implemented to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and support new versions of existing subgraphs that had not yet been indexed. + +### What does the upgrade Indexer do? + +- It bootstraps chains that have yet to receive indexing rewards on The Graph Network and ensures that an Indexer is available to serve queries as quickly as possible after a subgraph is published. +- It supports chains that were previously only available on the hosted service. Find a comprehensive list of supported chains [here](/supported-networks/). +- Indexers that operate an upgrade Indexer do so as a public service to support new subgraphs and additional chains that lack indexing rewards before The Graph Council approves them. + +### Why is Edge & Node running the upgrade Indexer? + +Edge & Node historically maintained the hosted service and, as a result, already have synced data for hosted service subgraphs. + +### What does the upgrade indexer mean for existing Indexers? + +Chains previously only supported on the hosted service were made available to developers on The Graph Network without indexing rewards at first. + +However, this action unlocked query fees for any interested Indexer and increased the number of subgraphs published on The Graph Network. As a result, Indexers have more opportunities to index and serve these subgraphs in exchange for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about the potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As it allowed more subgraphs to be upgraded from the hosted service to The Graph Network, Delegators benefit from the increased network activity. + +### Did the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer only allocates the minimum amount per subgraph and does not collect indexing rewards. + +It operates on an “as needed” basis, serving as a fallback until sufficient service quality is achieved by at least three other Indexers in the network for respective chains and subgraphs. + +### How does this affect subgraph developers? + +Subgraph developers can query their subgraphs on The Graph Network almost immediately after upgrading from the hosted service or [publishing from Subgraph Studio](/subgraphs/developing/publishing/publishing-a-subgraph/), as no lead time was required for indexing. Please note that [creating a subgraph](/developing/creating-a-subgraph/) was not impacted by this upgrade. + +### How does the upgrade Indexer benefit data consumers? + +The upgrade Indexer enables chains on the network that were previously only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How does the upgrade Indexer price queries? + +The upgrade Indexer prices queries at the market rate to avoid influencing the query fee market. + +### When will the upgrade Indexer stop supporting a subgraph? + +The upgrade Indexer supports a subgraph until at least 3 other Indexers successfully and consistently serve queries made to it. + +Furthermore, the upgrade Indexer stops supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume. The query volume to the upgrade Indexer should trend towards zero, as it has a small allocation size, and other Indexers should be chosen for queries ahead of it. diff --git a/website/pages/ar/billing.mdx b/website/pages/ar/billing.mdx deleted file mode 100644 index 42aa104673bb..000000000000 --- a/website/pages/ar/billing.mdx +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: الفوترة ---- - -## Subgraph Billing Plans - -There are two plans to use when querying subgraphs on The Graph Network. - -- **Free Plan**: The Free Plan includes 100,000 free monthly queries with full access to the Subgraph Studio testing environment. This plan is designed for hobbyists, hackathoners, and those with side projects to try out The Graph before scaling their dapp. - -- **Growth Plan**: The Growth Plan includes everything in the Free Plan with all queries after 100,000 monthly queries requiring payments with GRT or credit card. The Growth Plan is flexible enough to cover teams that have established dapps across a variety of use cases. - - - -## Query Payments with credit card - -- To set up billing with credit/debit cards, users should access Subgraph Studio (https://thegraph.com/studio/) - 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). - 2. انقر على زر "توصيل المحفظة" في الزاوية اليمنى العليا من الصفحة. ستتم إعادة توجيهك إلى صفحة اختيار المحفظة. حدد محفظتك وانقر على "توصيل". - 3. Choose “Upgrade plan” if you are upgrading from the Free Plan or choose “Manage Plan” if you have already added GRT to your billing balance in the past. Next, you can estimate the number of queries to get a pricing estimate, but this is not a required step. - 4. To choose a credit card payment, choose “Credit card” as the payment method and fill out your credit card information. Those who have used Stripe before can use the Link feature to autofill their details. -- Invoices will be processed at the end of each month and require an active credit card on file for all queries beyond the free plan quota. - -## Query Payments with GRT - -Subgraph users can use The Graph Token (or GRT) to pay for queries on The Graph Network. With GRT, invoices will be processed at the end of each month and require a sufficient balance of GRT to make queries beyond the Free Plan quota of 100,000 monthly queries. You'll be required to pay fees generated from your API keys. Using the billing contract, you'll be able to: - -- إضافة وسحب GRT من رصيد حسابك. -- تتبع أرصدتك بناءً على مقدار GRT الذي أضفته إلى رصيد حسابك ، والمبلغ الذي قمت بإزالته ، وفواتيرك. -- دفع الفواتير تلقائيًا بناءً على رسوم الاستعلام التي تم إنشاؤها ، طالما أن هناك ما يكفي من GRT في رصيد حسابك. - -### GRT on Arbitrum or Ethereum - -The Graph’s billing system accepts GRT on Arbitrum, and users will need ETH on Arbitrum to pay their gas. While The Graph protocol started on Ethereum Mainnet, all activity, including the billing contracts, is now on Arbitrum One. - -To pay for queries, you need GRT on Arbitrum. Here are a few different ways to achieve this: - -- If you already have GRT on Ethereum, you can bridge it to Arbitrum. You can do this via the GRT bridging option provided in Subgraph Studio or by using one of the following bridges: - -- [The Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) -- [الانتقال](https://transferto.xyz/swap) - -- If you already have assets on Arbitrum, you can swap them for GRT via a swapping protocol like Uniswap. - -- Alternatively, you acquire GRT directly on Arbitrum through a decentralized exchange. - -> This section is written assuming you already have GRT in your wallet, and you're on Arbitrum. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). - -Once you bridge GRT, you can add it to your billing balance. - -### Adding GRT using a wallet - -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). -2. انقر على زر "توصيل المحفظة" في الزاوية اليمنى العليا من الصفحة. ستتم إعادة توجيهك إلى صفحة اختيار المحفظة. حدد محفظتك وانقر على "توصيل". -3. Select the "Manage" button near the top right corner. First time users will see an option to "Upgrade to Growth plan" while returning users will click "Deposit from wallet". -4. Use the slider to estimate the number of queries you expect to make on a monthly basis. - - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. -5. Choose "Cryptocurrency". GRT is currently the only cryptocurrency accepted on The Graph Network. -6. Select the number of months you would like to prepay. - - Paying in advance does not commit you to future usage. You will only be charged for what you use and you can withdraw your balance at any time. -7. Pick the network from which you are depositing your GRT. GRT on Arbitrum or Ethereum are both acceptable. -8. Click "Allow GRT Access" and then specify the amount of GRT that can be taken from you wallet. - - If you are prepaying for multiple months, you must allow access to the amount that corresponds with that amount. This interaction will not cost any gas. -9. Lastly, click on "Add GRT to Billing Balance". This transaction will require ETH on Arbitrum to cover the gas costs. - -- Note that GRT deposited from Arbitrum will process within a few moments while GRT deposited from Ethereum will take approximately 15-20 minutes to process. Once the transaction is confirmed, you'll see the GRT added to your account balance. - -### Withdrawing GRT using a wallet - -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". -3. Click the "Manage" button at the top right corner of the page. Select "Withdraw GRT". A side panel will appear. -4. Enter the amount of GRT you would like to withdraw. -5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. The GRT will be sent to your Arbitrum wallet. -6. Once the transaction is confirmed, you'll see the GRT withdrawn from your account balance in your Arbitrum wallet. - -### إضافة GRT باستخدام محفظة متعددة التوقيع (multisig wallet) - -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. -3. Select the "Manage" button near the top right corner. First time users will see an option to "Upgrade to Growth plan" while returning users will click "Deposit from wallet". -4. Use the slider to estimate the number of queries you expect to make on a monthly basis. - - For suggestions on the number of queries you may use, see our **Frequently Asked Questions** page. -5. Choose "Cryptocurrency". GRT is currently the only cryptocurrency accepted on The Graph Network. -6. Select the number of months you would like to prepay. - - Paying in advance does not commit you to future usage. You will only be charged for what you use and you can withdraw your balance at any time. -7. Pick the network from which you are depositing your GRT. GRT on Arbitrum or Ethereum are both acceptable. 8. Click "Allow GRT Access" and then specify the amount of GRT that can be taken from you wallet. - - If you are prepaying for multiple months, you must allow access to the amount that corresponds with that amount. This interaction will not cost any gas. -8. Lastly, click on "Add GRT to Billing Balance". This transaction will require ETH on Arbitrum to cover the gas costs. - -- Note that GRT deposited from Arbitrum will process within a few moments while GRT deposited from Ethereum will take approximately 15-20 minutes to process. Once the transaction is confirmed, you'll see the GRT added to your account balance. - -## Getting GRT - -This section will show you how to get GRT to pay for query fees. - -### Coinbase - -This will be a step by step guide for purchasing GRT on Coinbase. - -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select GRT. -5. Select the payment method. Select your preferred payment method. -6. Select the amount of GRT you want to purchase. -7. Review your purchase. Review your purchase and click "Buy GRT". -8. Confirm your purchase. Confirm your purchase and you will have successfully purchased GRT. -9. You can transfer the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the GRT to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the GRT account. - - Enter the amount of GRT you want to send and the wallet address you want to send it to. - - Click "Continue" and confirm your transaction. -Please note that for larger purchase amounts, Coinbase may require you to wait 7-10 days before transferring the full amount to a wallet. - -You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). - -### Binance - -This will be a step by step guide for purchasing GRT on Binance. - -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy Now" button on the homepage banner. -4. You will be taken to a page where you can select the currency you want to purchase. Select GRT. -5. Select your preferred payment method. You'll be able to pay with different fiat currencies such as Euros, US Dollars, and more. -6. Select the amount of GRT you want to purchase. -7. Review your purchase and click "Buy GRT". -8. Confirm your purchase and you will be able to see your GRT in your Binance Spot Wallet. -9. You can withdraw the GRT from your account to your wallet such as [MetaMask](https://metamask.io/). - - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select GRT. - - Enter the amount of GRT you want to send and the whitelisted wallet address you want to send it to. - - Click "Continue" and confirm your transaction. - -You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). - -### Uniswap - -This is how you can purchase GRT on Uniswap. - -1. Go to [Uniswap](https://app.uniswap.org/swap?chain=arbitrum) and connect your wallet. -2. Select the token you want to swap from. Select ETH. -3. Select the token you want to swap to. Select GRT. - - Make sure you're swapping for the correct token. The GRT smart contract address on Arbitrum One is: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) -4. Enter the amount of ETH you want to swap. -5. Click "Swap". -6. Confirm the transaction in your wallet and you wait for the transaction to process. - -You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). - -## Getting Ether - -This section will show you how to get Ether (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. - -### Coinbase - -This will be a step by step guide for purchasing ETH on Coinbase. - -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will have successfully purchased ETH. -9. You can transfer the ETH from your Coinbase account to your wallet such as [MetaMask](https://metamask.io/). - - To transfer the ETH to your wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the ETH account. - - Enter the amount of ETH you want to send and the wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. - -You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). - -### Binance - -This will be a step by step guide for purchasing ETH on Binance. - -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. -4. Select the currency you want to purchase. Select ETH. -5. Select your preferred payment method. -6. Enter the amount of ETH you want to purchase. -7. Review your purchase and click "Buy ETH". -8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. -9. You can withdraw the ETH from your account to your wallet such as [MetaMask](https://metamask.io/). - - To withdraw the ETH to your wallet, add your wallet's address to the withdrawal whitelist. - - Click on the "wallet" button, click withdraw, and select ETH. - - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. - - Ensure that you are sending to your Ethereum wallet address on Arbitrum One. - - Click "Continue" and confirm your transaction. - -You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). - -## Billing FAQs - -### How many queries will I need? - -You don't need to know how many queries you'll need in advance. You will only be charged for what you use and you can withdraw GRT from your account at any time. - -We recommend you overestimate the number of queries you will need so that you don’t have to top up your balance frequently. A good estimate for small to medium sized applications is to start with 1M-2M queries per month and monitor usage closely in the first weeks. For larger apps, a good estimate is to use the number of daily visits your site gets multiplied by the number of queries your most active page makes upon opening. - -Of course, both new and existing users can reach out to Edge & Node's BD team for a consult to learn more about anticipated usage. - -### Can I withdraw GRT from my billing balance? - -Yes, you can always withdraw GRT that has not already been used for queries from your billing balance. The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). - -### What happens when my billing balance runs out? Will I get a warning? - -You will receive several email notifications before your billing balance runs out. diff --git a/website/pages/ar/chain-integration-overview.mdx b/website/pages/ar/chain-integration-overview.mdx deleted file mode 100644 index b8e41513fa9d..000000000000 --- a/website/pages/ar/chain-integration-overview.mdx +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: نظرة عامة حول عملية التكامل مع الشبكة ---- - -تم تصميم عملية تكامل قائمة على الحوكمة وبشفافية لفرق سلاسل الكتل التي تسعى للإندماج مع بروتوكول الغراف (https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). إنها عملية مكونة من 3 مراحل، كما هو ملخص أدناه. - -## المرحلة الأولى: التكامل التقني - -- Please visit [New Chain Integration](/new-chain-integration) for information on `graph-node` support for new chains. -- تستهل الفرق عملية التكامل مع البروتوكول من خلال إنشاء موضوع في المنتدى هنا(https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (الفئة الفرعية "مصادر البيانات الجديدة" تحت قسم "الحوكمة واقتراحات تحسين الغراف"). استخدام قالب المنتدى الافتراضي إلزامي. - -## المرحلة الثانية: التحقق من صحة التكامل - -- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON-RPC, Firehose or Substreams endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. -- مفهرسو الغراف يختبرون التكامل على شبكة إختبار الغراف. -- يقوم المطورون الأساسيون والمفهرسون بمراقبة استقرار، وأداء، وحتمية البيانات. - -## المرحلة الثالثة: التكامل مع الشبكة الرئيسية - -- يتم اقتراح التكامل مع الشبكة الرئيسية من قبل الفرق عن طريق تقديم اقتراح تحسين الغراف (GIP) واستهلال طلب سحب (PR) على مصفوفة دعم الميزات (https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md)(لمزيد من التفاصيل، يرجى زيارة الرابط). -- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. - ---- - -إذا بدت العملية مربكة، فلا تقلق! تلتزم مؤسسة الغراف بدعم المتكاملين من خلال تعزيز التعاون وتوفير المعلومات الجوهرية وتوجيههم خلال مراحل مختلفة، بما في ذلك توجيههم خلال عمليات الحوكمة مثل اقتراحات تحسين الغراف وطلبات السحب. إذا كان لديك أسئلة، فيرجى التواصل مع [info@thegraph.foundation](mailto:info@thegraph.foundation) أو من خلال ديسكورد (باستطاعتك التواصل مع بيدرو، عضو مؤسسة الغراف، أو IndexerDAO أو المطورين الأساسيين الآخرين). - -Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! - ---- - -## الأسئلة الشائعة - -### 1. كيف يتعلق هذا بـ مقترح تحسين الغراف "خدمات عالم البيانات" (https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)؟ - -هذه العملية مرتبطة بخدمة بيانات الغراف الفرعي، وهي مطبقة فقط على مصادر بيانات الغراف الفرعي الجديد. - -### 2. ماذا يحدث إذا تم دعم فايرهوز و سبستريمز بعد أن تم دعم الشبكة على الشبكة الرئيسية؟ - -هذا سيؤثر فقط على دعم البروتوكول لمكافآت الفهرسة على الغرافات الفرعية المدعومة من سبستريمز. تنفيذ الفايرهوز الجديد سيحتاج إلى الفحص على شبكة الاختبار، وفقًا للمنهجية الموضحة للمرحلة الثانية في هذا المقترح لتحسين الغراف. وعلى نحو مماثل، وعلى افتراض أن التنفيذ فعال وموثوق به، سيتتطالب إنشاء طلب سحب على [مصفوفة دعم الميزات] (https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) ("مصادر بيانات سبستريمز" ميزة للغراف الفرعي)، بالإضافة إلى مقترح جديد لتحسين الغراف، لدعم البروتوكول لمكافآت الفهرسة. يمكن لأي شخص إنشاء طلب السحب ومقترح تحسين الغراف؛ وسوف تساعد المؤسسة في الحصول على موافقة المجلس. - -### 3. How much time will the process of reaching full protocol support take? - -يُتوقع أن يستغرق الوصول إلى الشبكة الرئيسية عدة أسابيع، وذلك يعتمد على وقت تطوير التكامل، وما إذا كانت هناك حاجة إلى بحوث إضافية، واختبارات وإصلاحات الأخطاء، وكذلك توقيت عملية الحوكمة التي تتطلب ملاحظات المجتمع كما هو الحال دائمًا. - -يعتمد دعم البروتوكول لمكافآت الفهرسة على قدرة أصحاب الحصص في المضي قدماً في عمليات الفحص وجمع الملاحظات ومعالجة المساهمات في قاعدة الكود الأساسية، إذا كان ذلك قابلاً للتطبيق. هذا مرتبط مباشرة بنضج عملية التكامل ومدى استجابة فريق التكامل (والذي قد يكون أو قد لا يكون نفس الفريق المسؤول عن تنفيذ إجراء الإستدعاء عن بعد\الفايرهوز). المؤسسة هنا لمساعدة الدعم خلال العملية بأكملها. - -### 4. كيف سيتم التعامل مع الأولويات؟ - -Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. diff --git a/website/pages/ar/contracts.mdx b/website/pages/ar/contracts.mdx new file mode 100644 index 000000000000..d2255f3af336 --- /dev/null +++ b/website/pages/ar/contracts.mdx @@ -0,0 +1,29 @@ +--- +title: Protocol Contracts +--- + +import { ProtocolContractsTable } from '@/src/contracts' + +Below are the deployed contracts which power The Graph Network. Visit the official [contracts repository](https://github.com/graphprotocol/contracts) to learn more. + +## Arbitrum + +This is the principal deployment of The Graph Network. + + + +## Mainnet + +This was the original deployment of The Graph Network. [Learn more](/archived/arbitrum/arbitrum-faq/) about The Graph's scaling with Arbitrum. + + + +## Arbitrum Sepolia + +This is the primary testnet for The Graph Network. Testnet is predominantly used by core developers and ecosystem participants for testing purposes. There are no guarantees of service or availability on The Graph's testnets. + + + +## Sepolia + + diff --git a/website/pages/ar/cookbook/_meta.js b/website/pages/ar/cookbook/_meta.js deleted file mode 100644 index 7fc5602ab4d2..000000000000 --- a/website/pages/ar/cookbook/_meta.js +++ /dev/null @@ -1,5 +0,0 @@ -import meta from '../../en/cookbook/_meta.js' - -export default { - ...meta, -} diff --git a/website/pages/ar/cookbook/arweave.mdx b/website/pages/ar/cookbook/arweave.mdx deleted file mode 100644 index 06fe4729bf4b..000000000000 --- a/website/pages/ar/cookbook/arweave.mdx +++ /dev/null @@ -1,239 +0,0 @@ ---- -title: Building Subgraphs on Arweave ---- - -> Arweave support in Graph Node and on Subgraph Studio is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! - -In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. - -## What is Arweave? - -The Arweave protocol allows developers to store data permanently and that is the main difference between Arweave and IPFS, where IPFS lacks the feature; permanence, and files stored on Arweave can't be changed or deleted. - -Arweave already has built numerous libraries for integrating the protocol in a number of different programming languages. For more information you can check: - -- [Arwiki](https://arwiki.wiki/#/en/main) -- [Arweave Resources](https://www.arweave.org/build) - -## What are Arweave Subgraphs? - -The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). - -[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. - -## Building an Arweave Subgraph - -To be able to build and deploy Arweave Subgraphs, you need two packages: - -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. - -## Subgraph's components - -There are three components of a subgraph: - -### 1. Manifest - `subgraph.yaml` - -Defines the data sources of interest, and how they should be processed. Arweave is a new kind of data source. - -### 2. Schema - `schema.graphql` - -Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. - -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). - -### 3. AssemblyScript Mappings - `mapping.ts` - -This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. - -During subgraph development there are two key commands: - -``` -$ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder -``` - -## تعريف بيان الرسم البياني الفرعي - -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: - -```yaml -specVersion: 0.0.5 -description: Arweave Blocks Indexing -schema: - file: ./schema.graphql # link to the schema file -dataSources: - - kind: arweave - name: arweave-blocks - network: arweave-mainnet # The Graph only supports Arweave Mainnet - source: - owner: 'ID-OF-AN-OWNER' # The public key of an Arweave wallet - startBlock: 0 # set this to 0 to start indexing from chain genesis - mapping: - apiVersion: 0.0.5 - language: wasm/assemblyscript - file: ./src/blocks.ts # link to the file with the Assemblyscript mappings - entities: - - Block - - Transaction - blockHandlers: - - handler: handleBlock # the function name in the mapping file - transactionHandlers: - - handler: handleTx # the function name in the mapping file -``` - -- Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. In Subgraph Studio, Arweave's mainnet is `arweave-mainnet` -- Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet - -Arweave data sources support two types of handlers: - -- `blockHandlers` - Run on every new Arweave block. No source.owner is required. -- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` - -> The source.owner can be the owner's address, or their Public Key. - -> Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. - -> Note: [Irys (previously Bundlr)](https://irys.xyz/) transactions are not supported yet. - -## تعريف المخطط - -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). - -## أسيمبلي سكريبت التعيينات - -تمت كتابة المعالجات الخاصة بمعالجة الأحداث بـ[ أسيمبلي سكريبت ](https://www.assemblyscript.org/). - -Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/developing/graph-ts/api/). - -```tsx -class Block { - timestamp: u64 - lastRetarget: u64 - height: u64 - indepHash: Bytes - nonce: Bytes - previousBlock: Bytes - diff: Bytes - hash: Bytes - txRoot: Bytes - txs: Bytes[] - walletList: Bytes - rewardAddr: Bytes - tags: Tag[] - rewardPool: Bytes - weaveSize: Bytes - blockSize: Bytes - cumulativeDiff: Bytes - hashListMerkle: Bytes - poa: ProofOfAccess -} - -class Transaction { - format: u32 - id: Bytes - lastTx: Bytes - owner: Bytes - tags: Tag[] - target: Bytes - quantity: Bytes - data: Bytes - dataSize: Bytes - dataRoot: Bytes - signature: Bytes - reward: Bytes -} -``` - -Block handlers receive a `Block`, while transactions receive a `Transaction`. - -Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). - -## Deploying an Arweave Subgraph in Subgraph Studio - -Once your subgraph has been created on your Subgraph Studio dashboard, you can deploy by using the `graph deploy` CLI command. - -```bash -graph deploy --access-token -``` - -## Querying an Arweave Subgraph - -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. - -## أمثلة على الـ Subgraphs - -Here is an example subgraph for reference: - -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) - -## الأسئلة الشائعة - -### Can a subgraph index Arweave and other chains? - -No, a subgraph can only support data sources from one chain/network. - -### Can I index the stored files on Arweave? - -Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). - -### Can I identify Bundlr bundles in my subgraph? - -This is not currently supported. - -### How can I filter transactions to a specific account? - -The source.owner can be the user's public key or account address. - -### What is the current encryption format? - -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). - -The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: - -``` -const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" -]; - -const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" -]; - -function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; -} -``` diff --git a/website/pages/ar/cookbook/avoid-eth-calls.mdx b/website/pages/ar/cookbook/avoid-eth-calls.mdx deleted file mode 100644 index 8897ecdbfdc7..000000000000 --- a/website/pages/ar/cookbook/avoid-eth-calls.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Subgraph Best Practice 4 - Improve Indexing Speed by Avoiding eth_calls ---- - -## TLDR - -`eth_calls` are calls that can be made from a subgraph to an Ethereum node. These calls take a significant amount of time to return data, slowing down indexing. If possible, design smart contracts to emit all the data you need so you don’t need to use `eth_calls`. - -## Why Avoiding `eth_calls` Is a Best Practice - -Subgraphs are optimized to index event data emitted from smart contracts. A subgraph can also index the data coming from an `eth_call`, however, this can significantly slow down subgraph indexing as `eth_calls` require making external calls to smart contracts. The responsiveness of these calls relies not on the subgraph but on the connectivity and responsiveness of the Ethereum node being queried. By minimizing or eliminating eth_calls in our subgraphs, we can significantly improve our indexing speed. - -### What Does an eth_call Look Like? - -`eth_calls` are often necessary when the data required for a subgraph is not available through emitted events. For example, consider a scenario where a subgraph needs to identify whether ERC20 tokens are part of a specific pool, but the contract only emits a basic `Transfer` event and does not emit an event that contains the data that we need: - -```yaml -event Transfer(address indexed from, address indexed to, uint256 value); -``` - -Suppose the tokens' pool membership is determined by a state variable named `getPoolInfo`. In this case, we would need to use an `eth_call` to query this data: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, Transfer } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransfer(event: Transfer): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - // Bind the ERC20 contract instance to the given address: - let instance = ERC20.bind(event.address) - - // Retrieve pool information via eth_call - let poolInfo = instance.getPoolInfo(event.params.to) - - transaction.pool = poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is functional, however is not ideal as it slows down our subgraph’s indexing. - -## How to Eliminate `eth_calls` - -Ideally, the smart contract should be updated to emit all necessary data within events. For instance, modifying the smart contract to include pool information in the event could eliminate the need for `eth_calls`: - -``` -event TransferWithPool(address indexed from, address indexed to, uint256 value, bytes32 indexed poolInfo); -``` - -With this update, the subgraph can directly index the required data without external calls: - -```typescript -import { Address } from '@graphprotocol/graph-ts' -import { ERC20, TransferWithPool } from '../generated/ERC20/ERC20' -import { TokenTransaction } from '../generated/schema' - -export function handleTransferWithPool(event: TransferWithPool): void { - let transaction = new TokenTransaction(event.transaction.hash.toHex()) - - transaction.pool = event.params.poolInfo.toHexString() - transaction.from = event.params.from.toHexString() - transaction.to = event.params.to.toHexString() - transaction.value = event.params.value - - transaction.save() -} -``` - -This is much more performant as it has eliminated the need for `eth_calls`. - -## How to Optimize `eth_calls` - -If modifying the smart contract is not possible and `eth_calls` are required, read “[Improve Subgraph Indexing Performance Easily: Reduce eth_calls](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)” by Simon Emanuel Schmid to learn various strategies on how to optimize `eth_calls`. - -## Reducing the Runtime Overhead of `eth_calls` - -For the `eth_calls` that can not be eliminated, the runtime overhead they introduce can be minimized by declaring them in the manifest. When `graph-node` processes a block it performs all declared `eth_calls` in parallel before handlers are run. Calls that are not declared are executed sequentially when handlers run. The runtime improvement comes from performing calls in parallel rather than sequentially - that helps reduce the total time spent in calls but does not eliminate it completely. - -Currently, `eth_calls` can only be declared for event handlers. In the manifest, write - -```yaml -event: TransferWithPool(address indexed, address indexed, uint256, bytes32 indexed) -handler: handleTransferWithPool -calls: - ERC20.poolInfo: ERC20[event.address].getPoolInfo(event.params.to) -``` - -The portion highlighted in yellow is the call declaration. The part before the colon is simply a text label that is only used for error messages. The part after the colon has the form `Contract[address].function(params)`. Permissible values for address and params are `event.address` and `event.params.`. - -The handler itself accesses the result of this `eth_call` exactly as in the previous section by binding to the contract and making the call. graph-node caches the results of declared `eth_calls` in memory and the call from the handler will retrieve the result from this in memory cache instead of making an actual RPC call. - -Note: Declared eth_calls can only be made in subgraphs with specVersion >= 1.2.0. - -## Conclusion - -You can significantly improve indexing performance by minimizing or eliminating `eth_calls` in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/cookbook/grafting-hotfix/) diff --git a/website/pages/ar/cookbook/cosmos.mdx b/website/pages/ar/cookbook/cosmos.mdx deleted file mode 100644 index 15fbf0537bca..000000000000 --- a/website/pages/ar/cookbook/cosmos.mdx +++ /dev/null @@ -1,257 +0,0 @@ ---- -title: Building Subgraphs on Cosmos ---- - -This guide is an introduction on building subgraphs indexing [Cosmos](https://cosmos.network/) based blockchains. - -## What are Cosmos subgraphs? - -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. - -There are four types of handlers supported in Cosmos subgraphs: - -- **Block handlers** run whenever a new block is appended to the chain. -- **Event handlers** run when a specific event is emitted. -- **Transaction handlers** run when a transaction occurs. -- **Message handlers** run when a specific message occurs. - -Based on the [official Cosmos documentation](https://docs.cosmos.network/): - -> [Events](https://docs.cosmos.network/main/learn/advanced/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. - -> [Transactions](https://docs.cosmos.network/main/learn/advanced/transactions) are objects created by end-users to trigger state changes in the application. - -> [Messages](https://docs.cosmos.network/main/learn/advanced/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. - -Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. - -## Building a Cosmos subgraph - -### Subgraph Dependencies - -[graph-cli](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a CLI tool to build and deploy subgraphs, version `>=0.30.0` is required in order to work with Cosmos subgraphs. - -[graph-ts](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) is a library of subgraph-specific types, version `>=0.27.0` is required in order to work with Cosmos subgraphs. - -### Subgraph Main Components - -There are three key parts when it comes to defining a subgraph: - -**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. - -**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. - -**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. - -### تعريف Subgraph Manifest - -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: - -```yaml -specVersion: 0.0.5 -description: Cosmos Subgraph Example -schema: - file: ./schema.graphql # link to the schema file -dataSources: - - kind: cosmos - name: CosmosHub - network: cosmoshub-4 # This will change for each cosmos-based blockchain. In this case, the example uses the Cosmos Hub mainnet. - source: - startBlock: 0 # Required for Cosmos, set this to 0 to start indexing from chain genesis - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - blockHandlers: - - handler: handleNewBlock # the function name in the mapping file - eventHandlers: - - event: rewards # the type of the event that will be handled - handler: handleReward # the function name in the mapping file - transactionHandlers: - - handler: handleTransaction # the function name in the mapping file - messageHandlers: - - message: /cosmos.staking.v1beta1.MsgDelegate # the type of a message - handler: handleMsgDelegate # the function name in the mapping file - file: ./src/mapping.ts # link to the file with the Assemblyscript mappings -``` - -- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). -- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. - -### تعريف المخطط - -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). - -### AssemblyScript Mappings - -تمت كتابة المعالجات(handlers) الخاصة بمعالجة الأحداث بـ[ AssemblyScript ](https://www.assemblyscript.org/). - -Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/developing/graph-ts/api/). - -```tsx -class Block { - header: Header - evidence: EvidenceList - resultBeginBlock: ResponseBeginBlock - resultEndBlock: ResponseEndBlock - transactions: Array - validatorUpdates: Array -} - -class EventData { - event: Event - block: HeaderOnlyBlock - tx: TransactionContext -} - -class TransactionData { - tx: TxResult - block: HeaderOnlyBlock -} - -class MessageData { - message: Any - block: HeaderOnlyBlock - tx: TransactionContext -} - -class TransactionContext { - hash: Bytes - index: u32 - code: u32 - gasWanted: i64 - gasUsed: i64 -} - -class HeaderOnlyBlock { - header: Header -} - -class Header { - version: Consensus - chainId: string - height: u64 - time: Timestamp - lastBlockId: BlockID - lastCommitHash: Bytes - dataHash: Bytes - validatorsHash: Bytes - nextValidatorsHash: Bytes - consensusHash: Bytes - appHash: Bytes - lastResultsHash: Bytes - evidenceHash: Bytes - proposerAddress: Bytes - hash: Bytes -} - -class TxResult { - height: u64 - index: u32 - tx: Tx - result: ResponseDeliverTx - hash: Bytes -} - -class Event { - eventType: string - attributes: Array -} - -class Any { - typeUrl: string - value: Bytes -} -``` - -Each handler type comes with its own data structure that is passed as an argument to a mapping function. - -- Block handlers receive the `Block` type. -- Event handlers receive the `EventData` type. -- Transaction handlers receive the `TransactionData` type. -- Message handlers receive the `MessageData` type. - -As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). - -You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). - -### Message decoding - -It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://protobuf.dev/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. - -An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). - -## Creating and building a Cosmos subgraph - -The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: - -```bash -$ graph codegen -``` - -Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: - -```bash -$ graph build -``` - -## Deploying a Cosmos subgraph - -بمجرد إنشاء الـ subgraph الخاص بك ، يمكنك نشره باستخدام الأمر `graph deploy`: - -**Subgraph Studio** - -Visit the Subgraph Studio to create a new subgraph. - -```bash -graph deploy subgraph-name -``` - -**Local Graph Node (based on default configuration):** - -```bash -graph create subgraph-name --node http://localhost:8020 -``` - -```bash -graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost:5001 -``` - -## Querying a Cosmos subgraph - -The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. - -## Supported Cosmos Blockchains - -### Cosmos Hub - -#### What is Cosmos Hub? - -The [Cosmos Hub blockchain](https://hub.cosmos.network/) is the first blockchain in the [Cosmos](https://cosmos.network/) ecosystem. You can visit the [official documentation](https://docs.cosmos.network/) for more information. - -#### الشبكات - -Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testnet-001`.
Other Cosmos Hub networks, i.e. `cosmoshub-3`, are halted, therefore no data is provided for them. - -### Osmosis - -> Osmosis support in Graph Node and on Subgraph Studio is in beta: please contact the graph team with any questions about building Osmosis subgraphs! - -#### What is Osmosis? - -[Osmosis](https://osmosis.zone/) is a decentralized, cross-chain automated market maker (AMM) protocol built on top of the Cosmos SDK. It allows users to create custom liquidity pools and trade IBC-enabled tokens. You can visit the [official documentation](https://docs.osmosis.zone/) for more information. - -#### الشبكات - -Osmosis mainnet is `osmosis-1`. Osmosis current testnet is `osmo-test-4`. - -## أمثلة على الـ Subgraphs - -Here are some example subgraphs for reference: - -[Block Filtering Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) - -[Validator Rewards Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) - -[Validator Delegations Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) - -[Osmosis Token Swaps Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) diff --git a/website/pages/ar/cookbook/derivedfrom.mdx b/website/pages/ar/cookbook/derivedfrom.mdx deleted file mode 100644 index 09ba62abde3f..000000000000 --- a/website/pages/ar/cookbook/derivedfrom.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Subgraph Best Practice 2 - Improve Indexing and Query Responsiveness By Using @derivedFrom ---- - -## TLDR - -Arrays in your schema can really slow down a subgraph's performance as they grow beyond thousands of entries. If possible, the `@derivedFrom` directive should be used when using arrays as it prevents large arrays from forming, simplifies handlers, and reduces the size of individual entities, improving indexing speed and query performance significantly. - -## How to Use the `@derivedFrom` Directive - -You just need to add a `@derivedFrom` directive after your array in your schema. Like this: - -```graphql -comments: [Comment!]! @derivedFrom(field: "post") -``` - -`@derivedFrom` creates efficient one-to-many relationships, enabling an entity to dynamically associate with multiple related entities based on a field in the related entity. This approach removes the need for both sides of the relationship to store duplicate data, making the subgraph more efficient. - -### Example Use Case for `@derivedFrom` - -An example of a dynamically growing array is a blogging platform where a “Post” can have many “Comments”. - -Let’s start with our two entities, `Post` and `Comment` - -Without optimization, you could implement it like this with an array: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! -} - -type Comment @entity { - id: Bytes! - content: String! -} -``` - -Arrays like these will effectively store extra Comments data on the Post side of the relationship. - -Here’s what an optimized version looks like using `@derivedFrom`: - -```graphql -type Post @entity { - id: Bytes! - title: String! - content: String! - comments: [Comment!]! @derivedFrom(field: "post") -} - -type Comment @entity { - id: Bytes! - content: String! - post: Post! -} -``` - -Just by adding the `@derivedFrom` directive, this schema will only store the “Comments” on the “Comments” side of the relationship and not on the “Post” side of the relationship. Arrays are stored across individual rows, which allows them to expand significantly. This can lead to particularly large sizes if their growth is unbounded. - -This will not only make our subgraph more efficient, but it will also unlock three features: - -1. We can query the `Post` and see all of its comments. - -2. We can do a reverse lookup and query any `Comment` and see which post it comes from. - -3. We can use [Derived Field Loaders](/developing/graph-ts/api/#looking-up-derived-entities) to unlock the ability to directly access and manipulate data from virtual relationships in our subgraph mappings. - -## Conclusion - -Use the `@derivedFrom` directive in subgraphs to effectively manage dynamically growing arrays, enhancing indexing efficiency and data retrieval. - -For a more detailed explanation of strategies to avoid large arrays, check out Kevin Jones' blog: [Best Practices in Subgraph Development: Avoiding Large Arrays](https://thegraph.com/blog/improve-subgraph-performance-avoiding-large-arrays/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/cookbook/grafting-hotfix/) diff --git a/website/pages/ar/cookbook/grafting-hotfix.mdx b/website/pages/ar/cookbook/grafting-hotfix.mdx deleted file mode 100644 index b7699bf2bc85..000000000000 --- a/website/pages/ar/cookbook/grafting-hotfix.mdx +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Subgraph Best Practice 6 - Use Grafting for Quick Hotfix Deployment ---- - -## TLDR - -Grafting is a powerful feature in subgraph development that allows you to build and deploy new subgraphs while reusing the indexed data from existing ones. - -### نظره عامة - -This feature enables quick deployment of hotfixes for critical issues, eliminating the need to re-index the entire subgraph from scratch. By preserving historical data, grafting minimizes downtime and ensures continuity in data services. - -## Benefits of Grafting for Hotfixes - -1. **Rapid Deployment** - - - **Minimize Downtime**: When a subgraph encounters a critical error and stops indexing, grafting enables you to deploy a fix immediately without waiting for re-indexing. - - **Immediate Recovery**: The new subgraph continues from the last indexed block, ensuring that data services remain uninterrupted. - -2. **Data Preservation** - - - **Reuse Historical Data**: Grafting copies the existing data from the base subgraph, so you don’t lose valuable historical records. - - **Consistency**: Maintains data continuity, which is crucial for applications relying on consistent historical data. - -3. **Efficiency** - - **Save Time and Resources**: Avoids the computational overhead of re-indexing large datasets. - - **Focus on Fixes**: Allows developers to concentrate on resolving issues rather than managing data recovery. - -## Best Practices When Using Grafting for Hotfixes - -1. **Initial Deployment Without Grafting** - - - **Start Clean**: Always deploy your initial subgraph without grafting to ensure that it’s stable and functions as expected. - - **Test Thoroughly**: Validate the subgraph’s performance to minimize the need for future hotfixes. - -2. **Implementing the Hotfix with Grafting** - - - **Identify the Issue**: When a critical error occurs, determine the block number of the last successfully indexed event. - - **Create a New Subgraph**: Develop a new subgraph that includes the hotfix. - - **Configure Grafting**: Use grafting to copy data up to the identified block number from the failed subgraph. - - **Deploy Quickly**: Publish the grafted subgraph to restore service as soon as possible. - -3. **Post-Hotfix Actions** - - - **Monitor Performance**: Ensure the grafted subgraph is indexing correctly and the hotfix resolves the issue. - - **Republish Without Grafting**: Once stable, deploy a new version of the subgraph without grafting for long-term maintenance. - > Note: Relying on grafting indefinitely is not recommended as it can complicate future updates and maintenance. - - **Update References**: Redirect any services or applications to use the new, non-grafted subgraph. - -4. **Important Considerations** - - **Careful Block Selection**: Choose the graft block number carefully to prevent data loss. - - **Tip**: Use the block number of the last correctly processed event. - - **Use Deployment ID**: Ensure you reference the Deployment ID of the base subgraph, not the Subgraph ID. - - **Note**: The Deployment ID is the unique identifier for a specific subgraph deployment. - - **Feature Declaration**: Remember to declare grafting in the subgraph manifest under features. - -## Example: Deploying a Hotfix with Grafting - -Suppose you have a subgraph tracking a smart contract that has stopped indexing due to a critical error. Here’s how you can use grafting to deploy a hotfix. - -1. **Failed Subgraph Manifest (subgraph.yaml)** - - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: OldSmartContract - network: sepolia - source: - address: '0xOldContractAddress' - abi: Lock - startBlock: 5000000 - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/OldLock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleOldWithdrawal - file: ./src/old-lock.ts - ``` - -2. **New Grafted Subgraph Manifest (subgraph.yaml)** - ```yaml - specVersion: 1.0.0 - schema: - file: ./schema.graphql - dataSources: - - kind: ethereum/contract - name: NewSmartContract - network: sepolia - source: - address: '0xNewContractAddress' - abi: Lock - startBlock: 6000001 # Block after the last indexed block - mapping: - kind: ethereum/events - apiVersion: 0.0.7 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts - features: - - grafting - graft: - base: QmBaseDeploymentID # Deployment ID of the failed subgraph - block: 6000000 # Last successfully indexed block - ``` - -**Explanation:** - -- **Data Source Update**: The new subgraph points to 0xNewContractAddress, which may be a fixed version of the smart contract. -- **Start Block**: Set to one block after the last successfully indexed block to avoid reprocessing the error. -- **Grafting Configuration**: - - **base**: Deployment ID of the failed subgraph. - - **block**: Block number where grafting should begin. - -3. **Deployment Steps** - - - **Update the Code**: Implement the hotfix in your mapping scripts (e.g., handleWithdrawal). - - **Adjust the Manifest**: As shown above, update the `subgraph.yaml` with grafting configurations. - - **Deploy the Subgraph**: - - Authenticate with the Graph CLI. - - Deploy the new subgraph using `graph deploy`. - -4. **Post-Deployment** - - **Verify Indexing**: Check that the subgraph is indexing correctly from the graft point. - - **Monitor Data**: Ensure that new data is being captured and the hotfix is effective. - - **Plan for Republish**: Schedule the deployment of a non-grafted version for long-term stability. - -## Warnings and Cautions - -While grafting is a powerful tool for deploying hotfixes quickly, there are specific scenarios where it should be avoided to maintain data integrity and ensure optimal performance. - -- **Incompatible Schema Changes**: If your hotfix requires altering the type of existing fields or removing fields from your schema, grafting is not suitable. Grafting expects the new subgraph’s schema to be compatible with the base subgraph’s schema. Incompatible changes can lead to data inconsistencies and errors because the existing data won’t align with the new schema. -- **Significant Mapping Logic Overhauls**: When the hotfix involves substantial modifications to your mapping logic—such as changing how events are processed or altering handler functions—grafting may not function correctly. The new logic might not be compatible with the data processed under the old logic, leading to incorrect data or failed indexing. -- **Deployments to The Graph Network**: Grafting is not recommended for subgraphs intended for The Graph’s decentralized network (mainnet). It can complicate indexing and may not be fully supported by all Indexers, potentially causing unexpected behavior or increased costs. For mainnet deployments, it’s safer to re-index the subgraph from scratch to ensure full compatibility and reliability. - -### Risk Management - -- **Data Integrity**: Incorrect block numbers can lead to data loss or duplication. -- **Testing**: Always test grafting in a development environment before deploying to production. - -## Conclusion - -Grafting is an effective strategy for deploying hotfixes in subgraph development, enabling you to: - -- **Quickly Recover** from critical errors without re-indexing. -- **Preserve Historical Data**, maintaining continuity for applications and users. -- **Ensure Service Availability** by minimizing downtime during critical fixes. - -However, it’s important to use grafting judiciously and follow best practices to mitigate risks. After stabilizing your subgraph with the hotfix, plan to deploy a non-grafted version to ensure long-term maintainability. - -## مصادر إضافية - -- **[Grafting Documentation](/cookbook/grafting/)**: Replace a Contract and Keep its History With Grafting -- **[Understanding Deployment IDs](/querying/querying-by-subgraph-id-vs-deployment-id/)**: Learn the difference between Deployment ID and Subgraph ID. - -By incorporating grafting into your subgraph development workflow, you can enhance your ability to respond to issues swiftly, ensuring that your data services remain robust and reliable. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/cookbook/grafting-hotfix/) diff --git a/website/pages/ar/cookbook/grafting.mdx b/website/pages/ar/cookbook/grafting.mdx deleted file mode 100644 index 08c347c50a63..000000000000 --- a/website/pages/ar/cookbook/grafting.mdx +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: Replace a Contract and Keep its History With Grafting ---- - -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. - -## What is Grafting? - -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- يضيف أو يزيل أنواع الكيانات -- يزيل الصفات من أنواع الكيانات -- يضيف صفات nullable لأنواع الكيانات -- يحول صفات non-nullable إلى صفات nullable -- يضيف قيما إلى enums -- يضيف أو يزيل الواجهات -- يغير للكيانات التي يتم تنفيذ الواجهة لها - -For more information, you can check: - -- [تطعيم(Grafting)](/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) - -In this tutorial, we will be covering a basic use case. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. - -## Important Note on Grafting When Upgrading to the Network - -> **Caution**: It is recommended to not use grafting for subgraphs published to The Graph Network - -### Why Is This Important? - -Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. It is not possible to graft a subgraph from The Graph Network back to Subgraph Studio. - -### Best Practices - -**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. - -**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. - -By adhering to these guidelines, you minimize risks and ensure a smoother migration process. - -## Building an Existing Subgraph - -Building subgraphs is an essential part of The Graph, described more in depth [here](/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: - -- [Subgraph example repo](https://github.com/Shiyasmohd/grafting-tutorial) - -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). - -## تعريف Subgraph Manifest - -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: - -```yaml -specVersion: 0.0.4 -schema: - file: ./schema.graphql -dataSources: - - kind: ethereum - name: Lock - network: sepolia - source: - address: '0xb3aabe721794b85fe4e72134795c2f93b4eb7e63' - abi: Lock - startBlock: 5955690 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Withdrawal - abis: - - name: Lock - file: ./abis/Lock.json - eventHandlers: - - event: Withdrawal(uint256,uint256) - handler: handleWithdrawal - file: ./src/lock.ts -``` - -- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract -- The network should correspond to an indexed network being queried. Since we're running on Sepolia testnet, the network is `sepolia` -- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. - -## Grafting Manifest Definition - -Grafting requires adding two new items to the original subgraph manifest: - -```yaml ---- -features: - - grafting # feature name -graft: - base: Qm... # subgraph ID of base subgraph - block: 5956000 # block number -``` - -- `features:` is a list of all used [feature names](/developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. - -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting - -## Deploying the Base Subgraph - -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground - -```graphql -{ - withdrawals(first: 5) { - id - amount - when - } -} -``` - -It returns something like this: - -``` -{ - "data": { - "withdrawals": [ - { - "id": "0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d0a000000", - "amount": "0", - "when": "1716394824" - }, - { - "id": "0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc45203000000", - "amount": "0", - "when": "1716394848" - } - ] - } -} -``` - -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. - -## Deploying the Grafting Subgraph - -The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. - -1. Go to [Subgraph Studio](https://thegraph.com/studio/) and create a subgraph on Sepolia testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in Subgraph Studio. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground - -```graphql -{ - withdrawals(first: 5) { - id - amount - when - } -} -``` - -It should return the following: - -``` -{ - "data": { - "withdrawals": [ - { - "id": "0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d0a000000", - "amount": "0", - "when": "1716394824" - }, - { - "id": "0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc45203000000", - "amount": "0", - "when": "1716394848" - }, - { - "id": "0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af06000000", - "amount": "0", - "when": "1716429732" - } - ] - } -} -``` - -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://sepolia.etherscan.io/tx/0xe8323d21c4f104607b10b0fff9fc24b9612b9488795dea8196b2d5f980d3dc1d) and [Event 2](https://sepolia.etherscan.io/tx/0xea1cee35036f2cacb72f2a336be3e54ab911f5bebd58f23400ebb8ecc5cfc452). The new contract emitted one `Withdrawal` after, [Event 3](https://sepolia.etherscan.io/tx/0x2410475f76a44754bae66d293d14eac34f98ec03a3689cbbb56a716d20b209af). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. - -Congrats! You have successfully grafted a subgraph onto another subgraph. - -## مصادر إضافية - -If you want more experience with grafting, here are a few examples for popular contracts: - -- [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/config/templates/curve.template.yaml) -- [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) -- [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3-forks/protocols/uniswap-v3/config/templates/uniswapV3Template.yaml), - -To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](/developing/creating-a-subgraph/#data-source-templates) can achieve similar results - -> Note: A lot of material from this article was taken from the previously published [Arweave article](/cookbook/arweave/) diff --git a/website/pages/ar/cookbook/immutable-entities-bytes-as-ids.mdx b/website/pages/ar/cookbook/immutable-entities-bytes-as-ids.mdx deleted file mode 100644 index 541212617f9f..000000000000 --- a/website/pages/ar/cookbook/immutable-entities-bytes-as-ids.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: Subgraph Best Practice 3 - Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs ---- - -## TLDR - -Using Immutable Entities and Bytes for IDs in our `schema.graphql` file [significantly improves ](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/) indexing speed and query performance. - -## Immutable Entities - -To make an entity immutable, we simply add `(immutable: true)` to an entity. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -By making the `Transfer` entity immutable, graph-node is able to process the entity more efficiently, improving indexing speeds and query responsiveness. - -Immutable Entities structures will not change in the future. An ideal entity to become an Immutable Entity would be an entity that is directly logging on-chain event data, such as a `Transfer` event being logged as a `Transfer` entity. - -### Under the hood - -Mutable entities have a 'block range' indicating their validity. Updating these entities requires the graph node to adjust the block range of previous versions, increasing database workload. Queries also need filtering to find only live entities. Immutable entities are faster because they are all live and since they won't change, no checks or updates are required while writing, and no filtering is required during queries. - -### When not to use Immutable Entities - -If you have a field like `status` that needs to be modified over time, then you should not make the entity immutable. Otherwise, you should use immutable entities whenever possible. - -## Bytes as IDs - -Every entity requires an ID. In the previous example, we can see that the ID is already of the Bytes type. - -```graphql -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} -``` - -While other types for IDs are possible, such as String and Int8, it is recommended to use the Bytes type for all IDs due to character strings taking twice as much space as Byte strings to store binary data, and comparisons of UTF-8 character strings must take the locale into account which is much more expensive than the bytewise comparison used to compare Byte strings. - -### Reasons to Not Use Bytes as IDs - -1. If entity IDs must be human-readable such as auto-incremented numerical IDs or readable strings, Bytes for IDs should not be used. -2. If integrating a subgraph’s data with another data model that does not use Bytes as IDs, Bytes as IDs should not be used. -3. Indexing and querying performance improvements are not desired. - -### Concatenating With Bytes as IDs - -It is a common practice in many subgraphs to use string concatenation to combine two properties of an event into a single ID, such as using `event.transaction.hash.toHex() + "-" + event.logIndex.toString()`. However, as this returns a string, this significantly impedes subgraph indexing and querying performance. - -Instead, we should use the `concatI32()` method to concatenate event properties. This strategy results in a `Bytes` ID that is much more performant. - -```typescript -export function handleTransfer(event: TransferEvent): void { - let entity = new Transfer(event.transaction.hash.concatI32(event.logIndex.toI32())) - entity.from = event.params.from - entity.to = event.params.to - entity.value = event.params.value - - entity.blockNumber = event.block.number - entity.blockTimestamp = event.block.timestamp - entity.transactionHash = event.transaction.hash - - entity.save() -} -``` - -### Sorting With Bytes as IDs - -Sorting using Bytes as IDs is not optimal as seen in this example query and response. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: id) { - id - from - to - value - } -} -``` - -Query response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x00010000", - "from": "0xabcd...", - "to": "0x1234...", - "value": "256" - }, - { - "id": "0x00020000", - "from": "0xefgh...", - "to": "0x5678...", - "value": "512" - }, - { - "id": "0x01000000", - "from": "0xijkl...", - "to": "0x9abc...", - "value": "1" - } - ] - } -} -``` - -The IDs are returned as hex. - -To improve sorting, we should create another field on the entity that is a BigInt. - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! # address - to: Bytes! # address - value: BigInt! # unit256 - tokenId: BigInt! # uint256 -} -``` - -This will allow for sorting to be optimized sequentially. - -Query: - -```graphql -{ - transfers(first: 3, orderBy: tokenId) { - id - tokenId - } -} -``` - -Query Response: - -```json -{ - "data": { - "transfers": [ - { - "id": "0x…", - "tokenId": "1" - }, - { - "id": "0x…", - "tokenId": "2" - }, - { - "id": "0x…", - "tokenId": "3" - } - ] - } -} -``` - -## Conclusion - -Using both Immutable Entities and Bytes as IDs has been shown to markedly improve subgraph efficiency. Specifically, tests have highlighted up to a 28% increase in query performance and up to a 48% acceleration in indexing speeds. - -Read more about using Immutable Entities and Bytes as IDs in this blog post by David Lutterkort, a Software Engineer at Edge & Node: [Two Simple Subgraph Performance Improvements](https://thegraph.com/blog/two-simple-subgraph-performance-improvements/). - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/cookbook/grafting-hotfix/) diff --git a/website/pages/ar/cookbook/near.mdx b/website/pages/ar/cookbook/near.mdx deleted file mode 100644 index b2f9eaf75feb..000000000000 --- a/website/pages/ar/cookbook/near.mdx +++ /dev/null @@ -1,283 +0,0 @@ ---- -title: بناء Subgraphs على NEAR ---- - -هذا الدليل عبارة عن مقدمة لبناء subgraphs تقوم بفهرسة العقود الذكية على [NEAR blockchain](https://docs.near.org/). - -## ما هو NEAR؟ - -[NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/concepts/basics/protocol) for more information. - -## ماهي NEAR subgraphs؟ - -يوفر The Graph للمطورين أدوات لمعالجة أحداث blockchain وجعل البيانات الناتجة متاحة بسهولة عبر GraphQL API ، والمعروفة باسم subgraph. أصبح [ Graph Node ](https://github.com/graphprotocol/graph-node) الآن قادرًا على معالجة أحداث NEAR ، مما يعني أن مطوري NEAR يمكنهم الآن إنشاء subgraphs لفهرسة عقودهم الذكية (smart contracts). - -تعتمد الـ Subgraphs على الأحداث ، مما يعني أنها تستمع إلى أحداث on-chain ثم تعالجها. يوجد حاليًا نوعان من المعالجات المدعومة لـ NEAR subgraphs: - -- معالجات الكتل(Block handlers): يتم تشغيلها على كل كتلة جديدة -- معالجات الاستلام (Receipt handlers): يتم تشغيلها في كل مرة يتم فيها تنفيذ رسالة على حساب محدد - -[From the NEAR documentation](https://docs.near.org/build/data-infrastructure/lake-data-structures/receipt): - -> الاستلام (Receipt) هو الكائن الوحيد القابل للتنفيذ في النظام. عندما نتحدث عن "معالجة الإجراء" على منصة NEAR ، فإن هذا يعني في النهاية "تطبيق الاستلامات" في مرحلة ما. - -## بناء NEAR Subgraph - -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. - -`graphprotocol/graph-ts@` هي مكتبة لأنواع خاصة بـ subgraph. - -تطوير NEAR subgraph يتطلب `graph-cli` بإصدار أعلى من `0.23.0` و `graph-ts` بإصدار أعلى من `0.23.0`. - -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. - -هناك ثلاثة جوانب لتعريف الـ subgraph: - -**subgraph.yaml:** الـ subgraph manifest ، وتحديد مصادر البيانات ذات الأهمية ، وكيف يجب أن تتم معالجتها.علما أن NEAR هو `نوع` جديد لمصدر البيانات. - -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph#the-graphql-schema). - -**AssemblyScript Mappings:** [AssemblyScript code](/developing/graph-ts/api) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. - -During subgraph development there are two key commands: - -```bash -$ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder -``` - -### تعريف Subgraph Manifest - -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: - -```yaml -specVersion: 0.0.2 -schema: - file: ./src/schema.graphql # link to the schema file -dataSources: - - kind: near - network: near-mainnet - source: - account: app.good-morning.near # This data source will monitor this account - startBlock: 10662188 # Required for NEAR - mapping: - apiVersion: 0.0.5 - language: wasm/assemblyscript - blockHandlers: - - handler: handleNewBlock # the function name in the mapping file - receiptHandlers: - - handler: handleReceipt # the function name in the mapping file - file: ./src/mapping.ts # link to the file with the Assemblyscript mappings -``` - -- NEAR subgraphs يقدم `نوعا ` جديدا من مصدر بيانات (`near`) -- The `network` should correspond to a network on the hosting Graph Node. On Subgraph Studio, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` -- NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/concepts/protocol/account-model). This can be an account or a sub-account. -- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. - -```yaml -accounts: - prefixes: - - app - - good - suffixes: - - morning.near - - morning.testnet -``` - -مصادر بيانات NEAR تدعم نوعين من المعالجات: - -- `blockHandlers`: يتم تشغيلها على كل كتلة NEAR جديدة. لا يتطلب `source.account`. -- `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/tutorials/crosswords/basics/add-functions-call#create-a-subaccount) must be added as independent data sources). - -### تعريف المخطط - -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph#the-graphql-schema). - -### AssemblyScript Mappings - -تمت كتابة المعالجات(handlers) الخاصة بمعالجة الأحداث بـ[ AssemblyScript ](https://www.assemblyscript.org/). - -NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/developing/graph-ts/api). - -```typescript - -class ExecutionOutcome { - gasBurnt: u64, - blockHash: Bytes, - id: Bytes, - logs: Array, - receiptIds: Array, - tokensBurnt: BigInt, - executorId: string, - } - -class ActionReceipt { - predecessorId: string, - receiverId: string, - id: CryptoHash, - signerId: string, - gasPrice: BigInt, - outputDataReceivers: Array, - inputDataIds: Array, - actions: Array, - } - -class BlockHeader { - height: u64, - prevHeight: u64,// Always zero when version < V3 - epochId: Bytes, - nextEpochId: Bytes, - chunksIncluded: u64, - hash: Bytes, - prevHash: Bytes, - timestampNanosec: u64, - randomValue: Bytes, - gasPrice: BigInt, - totalSupply: BigInt, - latestProtocolVersion: u32, - } - -class ChunkHeader { - gasUsed: u64, - gasLimit: u64, - shardId: u64, - chunkHash: Bytes, - prevBlockHash: Bytes, - balanceBurnt: BigInt, - } - -class Block { - author: string, - header: BlockHeader, - chunks: Array, - } - -class ReceiptWithOutcome { - outcome: ExecutionOutcome, - receipt: ActionReceipt, - block: Block, - } -``` - -These types are passed to block & receipt handlers: - -- معالجات الكتلة ستتلقى`Block` -- معالجات الاستلام ستتلقى`ReceiptWithOutcome` - -Otherwise, the rest of the [AssemblyScript API](/developing/graph-ts/api) is available to NEAR subgraph developers during mapping execution. - -This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/developing/graph-ts/api#json-api) to allow developers to easily process these logs. - -## نشر NEAR Subgraph - -بمجرد امتلاكك لـ subgraph، فقد حان الوقت لنشره في Graph Node للفهرسة. يمكن نشر NEAR subgraphs في اصدارات Graph Node `>=v0.26.x` (لم يتم وضع علامة(tag) على هذا الإصدار ولم يتم إصداره بعد). - -Subgraph Studio and the upgrade Indexer on The Graph Network currently supports indexing NEAR mainnet and testnet in beta, with the following network names: - -- `near-mainnet` -- `near-testnet` - -More information on creating and deploying subgraphs on Subgraph Studio can be found [here](/deploying/deploying-a-subgraph-to-studio). - -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On Subgraph Studio, this can be done from [your Dashboard](https://thegraph.com/studio/): "Create a subgraph". - -بمجرد إنشاء الـ subgraph الخاص بك ، يمكنك نشره باستخدام الأمر `graph deploy`: - -```sh -$ graph create --node # creates a subgraph on a local Graph Node (on Subgraph Studio, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash -``` - -The node configuration will depend on where the subgraph is being deployed. - -### Subgraph Studio - -```sh -graph auth -graph deploy -``` - -### Local Graph Node (based on default configuration) - -```sh -graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 -``` - -بمجرد نشر الـ subgraph الخاص بك ، سيتم فهرسته بواسطة Graph Node. يمكنك التحقق من تقدمه عن طريق الاستعلام عن الـ subgraph نفسه: - -```graphql -{ - _meta { - block { - number - } - } -} -``` - -### Indexing NEAR with a Local Graph Node - -تشغيل Graph Node التي تقوم بفهرسة NEAR لها المتطلبات التشغيلية التالية: - -- NEAR Indexer Framework مع أجهزة Firehose -- مكونات NEAR Firehose -- تكوين Graph Node مع Firehose endpoint - -سوف نقدم المزيد من المعلومات حول تشغيل المكونات أعلاه قريبًا. - -## الاستعلام عن NEAR Subgraph - -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api) for more information. - -## أمثلة على الـ Subgraphs - -Here are some example subgraphs for reference: - -[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) - -[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) - -## الأسئلة الشائعة - -### How does the beta work? - -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! - -### Can a subgraph index both NEAR and EVM chains? - -No, a subgraph can only support data sources from one chain/network. - -### Can subgraphs react to more specific triggers? - -حاليًا ، يتم دعم مشغلات الكتلة(Block) والاستلام(Receipt). نحن نبحث في مشغلات استدعاءات الدوال لحساب محدد. نحن مهتمون أيضًا بدعم مشغلات الأحداث ، بمجرد حصول NEAR على دعم محلي للأحداث. - -### Will receipt handlers trigger for accounts and their sub-accounts? - -If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: - -```yaml -accounts: - suffixes: - - mintbase1.near -``` - -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? - -هذا غير مدعوم. نحن بصدد تقييم ما إذا كانت هذه الميزة مطلوبة للفهرسة. - -### Can I use data source templates in my NEAR subgraph? - -هذا غير مدعوم حاليا. نحن بصدد تقييم ما إذا كانت هذه الميزة مطلوبة للفهرسة. - -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? - -الوظيفة المعلقة ليست مدعومة لـ NEAR subgraphs. وفي غضون ذلك ، يمكنك نشر إصدار جديد على subgraph مختلف "مسمى" ، وبعد ذلك عندما تتم مزامنته مع رأس السلسلة ، يمكنك إعادة النشر إلى الـ subgraph الأساسي "المسمى" ، والذي سيستخدم نفس ID النشر الأساسي ، لذلك ستتم مزامنة الـ subgraph الرئيسي على الفور. - -### My question hasn't been answered, where can I get more help building NEAR subgraphs? - -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. - -## المراجع - -- [وثائق مطور NEAR](https://docs.near.org/tutorials/crosswords/basics/set-up-skeleton) diff --git a/website/pages/ar/cookbook/pruning.mdx b/website/pages/ar/cookbook/pruning.mdx deleted file mode 100644 index d86bf50edf42..000000000000 --- a/website/pages/ar/cookbook/pruning.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Subgraph Best Practice 1 - Improve Query Speed with Subgraph Pruning ---- - -## TLDR - -[Pruning](/developing/creating-a-subgraph/#prune) removes archival entities from the subgraph’s database up to a given block, and removing unused entities from a subgraph’s database will improve a subgraph’s query performance, often dramatically. Using `indexerHints` is an easy way to prune a subgraph. - -## How to Prune a Subgraph With `indexerHints` - -Add a section called `indexerHints` in the manifest. - -`indexerHints` has three `prune` options: - -- `prune: auto`: Retains the minimum necessary history as set by the Indexer, optimizing query performance. This is the generally recommended setting and is the default for all subgraphs created by `graph-cli` >= 0.66.0. -- `prune: `: Sets a custom limit on the number of historical blocks to retain. -- `prune: never`: No pruning of historical data; retains the entire history and is the default if there is no `indexerHints` section. `prune: never` should be selected if [Time Travel Queries](/querying/graphql-api/#time-travel-queries) are desired. - -We can add `indexerHints` to our subgraphs by updating our `subgraph.yaml`: - -```yaml -specVersion: 1.0.0 -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Contract - network: mainnet -``` - -## Important Considerations - -- If [Time Travel Queries](/querying/graphql-api/#time-travel-queries) are desired as well as pruning, pruning must be performed accurately to retain Time Travel Query functionality. Due to this, it is generally not recommended to use `indexerHints: prune: auto` with Time Travel Queries. Instead, prune using `indexerHints: prune: ` to accurately prune to a block height that preserves the historical data required by Time Travel Queries, or use `prune: never` to maintain all data. - -- It is not possible to [graft](/cookbook/grafting/) at a block height that has been pruned. If grafting is routinely performed and pruning is desired, it is recommended to use `indexerHints: prune: ` that will accurately retain a set number of blocks (e.g., enough for six months). - -## Conclusion - -Pruning using `indexerHints` is a best practice for subgraph development, offering significant query performance improvements. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/cookbook/grafting-hotfix/) diff --git a/website/pages/ar/cookbook/substreams-powered-subgraphs.mdx b/website/pages/ar/cookbook/substreams-powered-subgraphs.mdx deleted file mode 100644 index 8a7998c325f8..000000000000 --- a/website/pages/ar/cookbook/substreams-powered-subgraphs.mdx +++ /dev/null @@ -1,226 +0,0 @@ ---- -title: Substreams-powered subgraphs ---- - -[Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. - -## Requirements - -This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/documentation/consume/installing-the-cli), and the latest version of Graph CLI (>=0.52.0): - -``` -npm install -g @graphprotocol/graph-cli -``` - -## Get the cookbook - -> This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). - -``` -graph init --from-example substreams-powered-subgraph -``` - -## Defining a Substreams package - -A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. - -The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): - -```proto -syntax = "proto3"; - -package example; - -message Contracts { - repeated Contract contracts = 1; -} - -message Contract { - string address = 1; - uint64 blockNumber = 2; - string timestamp = 3; - uint64 ordinal = 4; -} -``` - -The core logic of the Substreams package is a `map_contract` module in `lib.rs`, which processes every block, filtering for Create calls which did not revert, returning `Contracts`: - -```rust -#[substreams::handlers::map] -fn map_contract(block: eth::v2::Block) -> Result { - let contracts = block - .transactions() - .flat_map(|tx| { - tx.calls - .iter() - .filter(|call| !call.state_reverted) - .filter(|call| call.call_type == eth::v2::CallType::Create as i32) - .map(|call| Contract { - address: format!("0x{}", Hex(&call.address)), - block_number: block.number, - timestamp: block.timestamp_seconds().to_string(), - ordinal: tx.begin_ordinal, - }) - }) - .collect(); - Ok(Contracts { contracts }) -} -``` - -A Substreams package can be used by a subgraph as long as it has a module which outputs compatible entity changes. The example Substreams package has an additional `graph_out` module in `lib.rs` which returns a `substreams_entity_change::pb::entity::EntityChanges` output, which can be processed by Graph Node. - -> The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. - -```rust -#[substreams::handlers::map] -pub fn graph_out(contracts: Contracts) -> Result { - // hash map of name to a table - let mut tables = Tables::new(); - - for contract in contracts.contracts.into_iter() { - tables - .create_row("Contract", contract.address) - .set("timestamp", contract.timestamp) - .set("blockNumber", contract.block_number); - } - - Ok(tables.to_entity_changes()) -} -``` - -These types and modules are pulled together in `substreams.yaml`: - -```yaml -specVersion: v0.1.0 -package: - name: 'substreams_test' # the name to be used in the .spkg - version: v1.0.1 # the version to use when creating the .spkg - -imports: # dependencies - entity: https://github.com/streamingfast/substreams-entity-change/releases/download/v0.2.1/substreams-entity-change-v0.2.1.spkg - -protobuf: # specifies custom types for use by Substreams modules - files: - - example.proto - importPaths: - - ./proto - -binaries: - default: - type: wasm/rust-v1 - file: ./target/wasm32-unknown-unknown/release/substreams.wasm - -modules: # specify modules with their inputs and outputs. - - name: map_contract - kind: map - inputs: - - source: sf.ethereum.type.v2.Block - output: - type: proto:test.Contracts - - - name: graph_out - kind: map - inputs: - - map: map_contract - output: - type: proto:substreams.entity.v1.EntityChanges # this type can be consumed by Graph Node -``` - -You can check the overall "flow" from a Block, to `map_contract` to `graph_out` by running `substreams graph`: - -```mermaid -graph TD; - map_contract[map: map_contract]; - sf.ethereum.type.v2.Block[source: sf.ethereum.type.v2.Block] --> map_contract; - graph_out[map: graph_out]; - map_contract --> graph_out; -``` - -To prepare this Substreams package for consumption by a subgraph, you must run the following commands: - -```bash -yarn substreams:protogen # generates types in /src/pb -yarn substreams:build # builds the substreams -yarn substreams:package # packages the substreams in a .spkg file - -# alternatively, yarn substreams:prepare calls all of the above commands -``` - -> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands - -This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. - -> If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. - -## Defining a Substreams-powered subgraph - -Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. - -This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. - -> Currently, Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). - -```yaml -specVersion: 0.0.4 -description: Ethereum Contract Tracking Subgraph (powered by Substreams) -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: schema.graphql -dataSources: - - kind: substreams - name: substream_test - network: mainnet - source: - package: - moduleName: graph_out - file: substreams-test-v1.0.1.spkg - mapping: - kind: substreams/graph-entities - apiVersion: 0.0.5 -``` - -The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. - -```graphql -type Contract @entity { - id: ID! - - "The timestamp when the contract was deployed" - timestamp: String! - - "The block number of the contract deployment" - blockNumber: BigInt! -} -``` - -Given the above, subgraph developers can use Graph CLI to deploy this Substreams-powered subgraph. - -> Substreams-powered subgraphs indexing mainnet Ethereum can be deployed to the [Subgraph Studio](https://thegraph.com/studio/). - -```bash -yarn install # install graph-cli -yarn subgraph:build # build the subgraph -yarn subgraph:deploy # deploy the subgraph -``` - -That's it! You have built and deployed a Substreams-powered subgraph. - -## Serving Substreams-powered subgraphs - -In order to serve Substreams-powered subgraphs, Graph Node must be configured with a Substreams provider for the relevant network, as well as a Firehose or RPC to track the chain head. These providers can be configured via a `config.toml` file: - -```toml -[chains.mainnet] -shard = "main" -protocol = "ethereum" -provider = [ - { label = "substreams-provider-mainnet", - details = { type = "substreams", - url = "https://mainnet-substreams-url.grpc.substreams.io/", - token = "exampletokenhere" }}, - { label = "firehose-provider-mainnet", - details = { type = "firehose", - url = "https://mainnet-firehose-url.grpc.firehose.io/", - token = "exampletokenhere" }}, -] -``` diff --git a/website/pages/ar/cookbook/timeseries.mdx b/website/pages/ar/cookbook/timeseries.mdx deleted file mode 100644 index a6402c800725..000000000000 --- a/website/pages/ar/cookbook/timeseries.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Subgraph Best Practice 5 - Simplify and Optimize with Timeseries and Aggregations ---- - -## TLDR - -Leveraging the new time-series and aggregations feature in subgraphs can significantly enhance both indexing speed and query performance. - -## نظره عامة - -Timeseries and aggregations reduce data processing overhead and accelerate queries by offloading aggregation computations to the database and simplifying mapping code. This approach is particularly effective when handling large volumes of time-based data. - -## Benefits of Timeseries and Aggregations - -1. Improved Indexing Time - -- Less Data to Load: Mappings handle less data since raw data points are stored as immutable timeseries entities. -- Database-Managed Aggregations: Aggregations are automatically computed by the database, reducing the workload on the mappings. - -2. Simplified Mapping Code - -- No Manual Calculations: Developers no longer need to write complex aggregation logic in mappings. -- Reduced Complexity: Simplifies code maintenance and minimizes the potential for errors. - -3. Dramatically Faster Queries - -- Immutable Data: All timeseries data is immutable, enabling efficient storage and retrieval. -- Efficient Data Separation: Aggregates are stored separately from raw timeseries data, allowing queries to process significantly less data—often several orders of magnitude less. - -### Important Considerations - -- Immutable Data: Timeseries data cannot be altered once written, ensuring data integrity and simplifying indexing. -- Automatic ID and Timestamp Management: id and timestamp fields are automatically managed by graph-node, reducing potential errors. -- Efficient Data Storage: By separating raw data from aggregates, storage is optimized, and queries run faster. - -## How to Implement Timeseries and Aggregations - -### Defining Timeseries Entities - -A timeseries entity represents raw data points collected over time. It is defined with the `@entity(timeseries: true)` annotation. Key requirements: - -- Immutable: Timeseries entities are always immutable. -- Mandatory Fields: - - `id`: Must be of type `Int8!` and is auto-incremented. - - `timestamp`: Must be of type `Timestamp!` and is automatically set to the block timestamp. - -Example: - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} -``` - -### Defining Aggregation Entities - -An aggregation entity computes aggregated values from a timeseries source. It is defined with the `@aggregation` annotation. Key components: - -- Annotation Arguments: - - `intervals`: Specifies time intervals (e.g., `["hour", "day"]`). - -Example: - -```graphql -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -In this example, Stats aggregates the price field from Data over hourly and daily intervals, computing the sum. - -### Querying Aggregated Data - -Aggregations are exposed via query fields that allow filtering and retrieval based on dimensions and time intervals. - -Example: - -```graphql -{ - tokenStats( - interval: "hour" - where: { token: "0x1234567890abcdef", timestamp_gte: "1704164640000000", timestamp_lt: "1704251040000000" } - ) { - id - timestamp - token { - id - } - totalVolume - priceUSD - count - } -} -``` - -### Using Dimensions in Aggregations - -Dimensions are non-aggregated fields used to group data points. They enable aggregations based on specific criteria, such as a token in a financial application. - -Example: - -### Timeseries Entity - -```graphql -type TokenData @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - token: Token! - amount: BigDecimal! - priceUSD: BigDecimal! -} -``` - -### Aggregation Entity with Dimension - -```graphql -type TokenStats @aggregation(intervals: ["hour", "day"], source: "TokenData") { - id: Int8! - timestamp: Timestamp! - token: Token! - totalVolume: BigDecimal! @aggregate(fn: "sum", arg: "amount") - priceUSD: BigDecimal! @aggregate(fn: "last", arg: "priceUSD") - count: Int8! @aggregate(fn: "count", cumulative: true) -} -``` - -- Dimension Field: token groups the data, so aggregates are computed per token. -- Aggregates: - - totalVolume: Sum of amount. - - priceUSD: Last recorded priceUSD. - - count: Cumulative count of records. - -### Aggregation Functions and Expressions - -Supported aggregation functions: - -- sum -- count -- min -- max -- first -- last - -### The arg in @aggregate can be - -- A field name from the timeseries entity. -- An expression using fields and constants. - -### Examples of Aggregation Expressions - -- Sum Token Value: @aggregate(fn: "sum", arg: "priceUSD \_ amount") -- Maximum Positive Amount: @aggregate(fn: "max", arg: "greatest(amount0, amount1, 0)") -- Conditional Sum: @aggregate(fn: "sum", arg: "case when amount0 > amount1 then amount0 else 0 end") - -Supported operators and functions include basic arithmetic (+, -, \_, /), comparison operators, logical operators (and, or, not), and SQL functions like greatest, least, coalesce, etc. - -### Query Parameters - -- interval: Specifies the time interval (e.g., "hour"). -- where: Filters based on dimensions and timestamp ranges. -- timestamp_gte / timestamp_lt: Filters for start and end times (microseconds since epoch). - -### Notes - -- Sorting: Results are automatically sorted by timestamp and id in descending order. -- Current Data: An optional current argument can include the current, partially filled interval. - -### Conclusion - -Implementing timeseries and aggregations in subgraphs is a best practice for projects dealing with time-based data. This approach: - -- Enhances Performance: Speeds up indexing and querying by reducing data processing overhead. -- Simplifies Development: Eliminates the need for manual aggregation logic in mappings. -- Scales Efficiently: Handles large volumes of data without compromising on speed or responsiveness. - -By adopting this pattern, developers can build more efficient and scalable subgraphs, providing faster and more reliable data access to end-users. To learn more about implementing timeseries and aggregations, refer to the [Timeseries and Aggregations Readme](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) and consider experimenting with this feature in your subgraphs. - -## Subgraph Best Practices 1-6 - -1. [Improve Query Speed with Subgraph Pruning](/cookbook/pruning/) - -2. [Improve Indexing and Query Responsiveness by Using @derivedFrom](/cookbook/derivedfrom/) - -3. [Improve Indexing and Query Performance by Using Immutable Entities and Bytes as IDs](/cookbook/immutable-entities-bytes-as-ids/) - -4. [Improve Indexing Speed by Avoiding `eth_calls`](/cookbook/avoid-eth-calls/) - -5. [Simplify and Optimize with Timeseries and Aggregations](/cookbook/timeseries/) - -6. [Use Grafting for Quick Hotfix Deployment](/cookbook/grafting-hotfix/) diff --git a/website/pages/ar/cookbook/transfer-to-the-graph.mdx b/website/pages/ar/cookbook/transfer-to-the-graph.mdx deleted file mode 100644 index 5c0446fa7fda..000000000000 --- a/website/pages/ar/cookbook/transfer-to-the-graph.mdx +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Tranfer to The Graph ---- - -Quickly upgrade your subgraphs from any platform to [The Graph's decentralized network](https://thegraph.com/networks/). - -## Benefits of Switching to The Graph - -- Use the same subgraph that your apps already use with zero-downtime migration. -- Increase reliability from a global network supported by 100+ Indexers. -- Receive lightning-fast support for subgraphs 24/7, with an on-call engineering team. - -## Upgrade Your Subgraph to The Graph in 3 Easy Steps - -1. [Set Up Your Studio Environment](/cookbook/transfer-to-the-graph/#1-set-up-your-studio-environment) -2. [Deploy Your Subgraph to Studio](/cookbook/transfer-to-the-graph/#2-deploy-your-subgraph-to-studio) -3. [Publish to The Graph Network](/cookbook/transfer-to-the-graph/#publish-your-subgraph-to-the-graphs-decentralized-network) - -## 1. Set Up Your Studio Environment - -### Create a Subgraph in Subgraph Studio - -- Go to [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. -- Click "Create a Subgraph". It is recommended to name the subgraph in Title Case: "Subgraph Name Chain Name". - -> Note: After publishing, the subgraph name will be editable but requires on-chain action each time, so name it properly. - -### Install the Graph CLI⁠ - -You must have [Node.js](https://nodejs.org/) and a package manager of your choice (`npm` or `pnpm`) installed to use the Graph CLI. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. - -On your local machine, run the following command: - -Using [npm](https://www.npmjs.com/): - -```sh -npm install -g @graphprotocol/graph-cli@latest -``` - -Use the following command to create a subgraph in Studio using the CLI: - -```sh -graph init --product subgraph-studio -``` - -### Authenticate Your Subgraph - -In The Graph CLI, use the auth command seen in Subgraph Studio: - -```sh -graph auth -``` - -## 2. Deploy Your Subgraph to Studio - -If you have your source code, you can easily deploy it to Studio. If you don't have it, here's a quick way to deploy your subgraph. - -In The Graph CLI, run the following command: - -```sh -graph deploy --ipfs-hash - -``` - -> **Note:** Every subgraph has an IPFS hash (Deployment ID), which looks like this: "Qmasdfad...". To deploy simply use this **IPFS hash**. You’ll be prompted to enter a version (e.g., v0.0.1). - -## 3. Publish Your Subgraph to The Graph Network - -![publish button](/img/publish-sub-transfer.png) - -### Query Your Subgraph - -> To attract about 3 indexers to query your subgraph, it’s recommended to curate at least 3,000 GRT. To learn more about curating, check out [Curating](/network/curating/) on The Graph. - -You can start [querying](/querying/querying-the-graph/) any subgraph by sending a GraphQL query into the subgraph’s query URL endpoint, which is located at the top of its Explorer page in Subgraph Studio. - -#### مثال - -[CryptoPunks Ethereum subgraph](https://thegraph.com/explorer/subgraphs/HdVdERFUe8h61vm2fDyycHgxjsde5PbB832NHgJfZNqK) by Messari: - -![Query URL](/img/cryptopunks-screenshot-transfer.png) - -The query URL for this subgraph is: - -```sh -https://gateway-arbitrum.network.thegraph.com/api/`**your-own-api-key**`/subgraphs/id/HdVdERFUe8h61vm2fDyycgxjsde5PbB832NHgJfZNqK -``` - -Now, you simply need to fill in **your own API Key** to start sending GraphQL queries to this endpoint. - -### Getting your own API Key - -You can create API Keys in Subgraph Studio under the “API Keys” menu at the top of the page: - -![API keys](/img/Api-keys-screenshot.png) - -### Monitor Subgraph Status - -Once you upgrade, you can access and manage your subgraphs in [Subgraph Studio](https://thegraph.com/studio/) and explore all subgraphs in [The Graph Explorer](https://thegraph.com/networks/). - -### مصادر إضافية - -- To quickly create and publish a new subgraph, check out the [Quick Start](/quick-start/). -- To explore all the ways you can optimize and customize your subgraph for a better performance, read more about [creating a subgraph here](/developing/creating-a-subgraph/). diff --git a/website/pages/ar/deploying/_meta.js b/website/pages/ar/deploying/_meta.js deleted file mode 100644 index 3d7abedc4d57..000000000000 --- a/website/pages/ar/deploying/_meta.js +++ /dev/null @@ -1,5 +0,0 @@ -import meta from '../../en/deploying/_meta.js' - -export default { - ...meta, -} diff --git a/website/pages/ar/deploying/deploy-using-subgraph-studio.mdx b/website/pages/ar/deploying/deploy-using-subgraph-studio.mdx deleted file mode 100644 index 3e357875b406..000000000000 --- a/website/pages/ar/deploying/deploy-using-subgraph-studio.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: Deploy Using Subgraph Studio ---- - -Learn how to deploy your subgraph to Subgraph Studio. - -> Note: When you deploy a subgraph, you push it to Subgraph Studio, where you'll be able to test it. It's important to remember that deploying is not the same as publishing. When you publish a subgraph, you're publishing it on-chain. - -## Subgraph Studio Overview - -In [Subgraph Studio](https://thegraph.com/studio/), you can do the following: - -- View a list of subgraphs you've created -- Manage, view details, and visualize the status of a specific subgraph -- إنشاء وإدارة مفاتيح API الخاصة بك لـ subgraphs محددة -- Restrict your API keys to specific domains and allow only certain Indexers to query with them -- Create your subgraph -- Deploy your subgraph using The Graph CLI -- Test your subgraph in the playground environment -- Integrate your subgraph in staging using the development query URL -- Publish your subgraph to The Graph Network -- Manage your billing - -## Install The Graph CLI - -Before deploying, you must install The Graph CLI. - -You must have [Node.js](https://nodejs.org/) and a package manager of your choice (`npm`, `yarn` or `pnpm`) installed to use The Graph CLI. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. - -### Install with yarn - -```bash -yarn global add @graphprotocol/graph-cli -``` - -### Install with npm - -```bash -npm install -g @graphprotocol/graph-cli -``` - -## البدء - -1. Open [Subgraph Studio](https://thegraph.com/studio/). -2. Connect your wallet to sign in. - - You can do this via MetaMask, Coinbase Wallet, WalletConnect, or Safe. -3. After you sign in, your unique deploy key will be displayed on your subgraph details page. - - The deploy key allows you to publish your subgraphs or manage your API keys and billing. It is unique but can be regenerated if you think it has been compromised. - -> Important: You need an API key to query subgraphs - -### How to Create a Subgraph in Subgraph Studio - - - -> For additional written detail, review the [Quick Start](/quick-start/). - -### توافق الـ Subgraph مع شبكة The Graph - -In order to be supported by Indexers on The Graph Network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- يجب ألا تستخدم أيًا من الميزات التالية: - - ipfs.cat & ipfs.map - - أخطاء غير فادحة - - تطعيم(Grafting) - -## Initialize Your Subgraph - -Once your subgraph has been created in Subgraph Studio, you can initialize its code through the CLI using this command: - -```bash -graph init -``` - -You can find the `` value on your subgraph details page in Subgraph Studio, see image below: - -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) - -After running `graph init`, you will be asked to input the contract address, network, and an ABI that you want to query. This will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. - -## Graph Auth - -Before you can deploy your subgraph to Subgraph Studio, you need to log into your account within the CLI. To do this, you will need your deploy key, which you can find under your subgraph details page. - -Then, use the following command to authenticate from the CLI: - -```bash -graph auth -``` - -## Deploying a Subgraph - -Once you are ready, you can deploy your subgraph to Subgraph Studio. - -> Deploying a subgraph with the CLI pushes it to the Studio, where you can test it and update the metadata. This action won't publish your subgraph to the decentralized network. - -Use the following CLI command to deploy your subgraph: - -```bash -graph deploy -``` - -After running this command, the CLI will ask for a version label. - -- It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as `v1`, `version1`, or `asdf`. -- The labels you create will be visible in Graph Explorer and can be used by curators to decide if they want to signal on a specific version or not, so choose them wisely. - -## Testing Your Subgraph - -After deploying, you can test your subgraph (either in Subgraph Studio or in your own app, with the deployment query URL), deploy another version, update the metadata, and publish to [Graph Explorer](https://thegraph.com/explorer) when you are ready. - -Use Subgraph Studio to check the logs on the dashboard and look for any errors with your subgraph. - -## Publish Your Subgraph - -In order to publish your subgraph successfully, review [publishing a subgraph](/publishing/publishing-a-subgraph/). - -## Versioning Your Subgraph with the CLI - -If you want to update your subgraph, you can do the following: - -- You can deploy a new version to Studio using the CLI (it will only be private at this point). -- Once you're happy with it, you can publish your new deployment to [Graph Explorer](https://thegraph.com/explorer). -- This action will create a new version of your subgraph that Curators can start signaling on and Indexers can index. - -You can also update your subgraph's metadata without publishing a new version. You can update your subgraph details in Studio (under the profile picture, name, description, etc.) by checking an option called **Update Details** in [Graph Explorer](https://thegraph.com/explorer). If this is checked, an on-chain transaction will be generated that updates subgraph details in Explorer without having to publish a new version with a new deployment. - -> Note: There are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, you must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if Curators have not signaled on it. For more information, please read more [here](/network/curating/). - -## الأرشفة التلقائية لإصدارات الـ Subgraph - -Whenever you deploy a new subgraph version in Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in Subgraph Studio. - -> Note: Previous versions of non-published subgraphs deployed to Studio will be automatically archived. - -![Subgraph Studio - Unarchive](/img/Unarchive.png) diff --git a/website/pages/ar/deploying/multiple-networks.mdx b/website/pages/ar/deploying/multiple-networks.mdx deleted file mode 100644 index dc2b8e533430..000000000000 --- a/website/pages/ar/deploying/multiple-networks.mdx +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: Deploying a Subgraph to Multiple Networks ---- - -This page explains how to deploy a subgraph to multiple networks. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). If you have not created a subgraph already, see [Creating a subgraph](/developing/creating-a-subgraph). - -## Deploying the subgraph to multiple networks - -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. - -### Using `graph-cli` - -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: - -```sh -Options: - - ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") -``` - -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. - -> Note: The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. - -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: - -```json -{ - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) - }, - "dataSource2": { - "address": "0x123...", - "startBlock": 123444 - } - }, - "network2": { - "dataSource1": { - "address": "0x987...", - "startBlock": 123 - }, - "dataSource2": { - "address": "0xxyz..", - "startBlock": 456 - } - }, - ... -} -``` - -> Note: You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. - -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `sepolia` networks, and this is your `subgraph.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x123...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -This is what your networks config file should look like: - -```json -{ - "mainnet": { - "Gravity": { - "address": "0x123..." - } - }, - "sepolia": { - "Gravity": { - "address": "0xabc..." - } - } -} -``` - -Now we can run one of the following commands: - -```sh -# Using default networks.json file -yarn build --network sepolia - -# Using custom named file -yarn build --network sepolia --network-file path/to/config -``` - -The `build` command will update your `subgraph.yaml` with the `sepolia` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: sepolia - source: - address: '0xabc...' - abi: Gravity - mapping: - kind: ethereum/events -``` - -Now you are ready to `yarn deploy`. - -> Note: As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: - -```sh -# Using default networks.json file -yarn deploy --network sepolia - -# Using custom named file -yarn deploy --network sepolia --network-file path/to/config -``` - -### Using subgraph.yaml template - -One way to parameterize aspects like contract addresses using older `graph-cli` versions is to generate parts of it with a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). - -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Sepolia using different contract addresses. You could then define two config files providing the addresses for each network: - -```json -{ - "network": "mainnet", - "address": "0x123..." -} -``` - -and - -```json -{ - "network": "sepolia", - "address": "0xabc..." -} -``` - -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: - -```yaml -# ... -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - network: {{network}} - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - address: '{{address}}' - abi: Gravity - mapping: - kind: ethereum/events -``` - -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: - -```json -{ - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:sepolia": "mustache config/sepolia.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } -} -``` - -To deploy this subgraph for mainnet or Sepolia you would now simply run one of the two following commands: - -```sh -# Mainnet: -yarn prepare:mainnet && yarn deploy - -# Sepolia: -yarn prepare:sepolia && yarn deploy -``` - -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). - -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. - -## Subgraph Studio subgraph archive policy - -A subgraph version in Studio is archived if and only if it meets the following criteria: - -- The version is not published to the network (or pending publish) -- The version was created 45 or more days ago -- The subgraph hasn't been queried in 30 days - -In addition, when a new version is deployed, if the subgraph has not been published, then the N-2 version of the subgraph is archived. - -Every subgraph affected with this policy has an option to bring the version in question back. - -## Checking subgraph health - -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. - -Graph Node exposes a GraphQL endpoint which you can query to check the status of your subgraph. On the hosted service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: - -```graphql -{ - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } -} -``` - -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. diff --git a/website/pages/ar/developing/_meta.js b/website/pages/ar/developing/_meta.js deleted file mode 100644 index 48d6b89bb3fe..000000000000 --- a/website/pages/ar/developing/_meta.js +++ /dev/null @@ -1,5 +0,0 @@ -import meta from '../../en/developing/_meta.js' - -export default { - ...meta, -} diff --git a/website/pages/ar/developing/creating-a-subgraph/_meta.js b/website/pages/ar/developing/creating-a-subgraph/_meta.js deleted file mode 100644 index a904468b50a2..000000000000 --- a/website/pages/ar/developing/creating-a-subgraph/_meta.js +++ /dev/null @@ -1,5 +0,0 @@ -import meta from '../../../en/developing/creating-a-subgraph/_meta.js' - -export default { - ...meta, -} diff --git a/website/pages/ar/developing/creating-a-subgraph/advanced.mdx b/website/pages/ar/developing/creating-a-subgraph/advanced.mdx deleted file mode 100644 index 04984ebb31a6..000000000000 --- a/website/pages/ar/developing/creating-a-subgraph/advanced.mdx +++ /dev/null @@ -1,555 +0,0 @@ ---- -title: Advance Subgraph Features ---- - -## نظره عامة - -Add and implement advanced subgraph features to enhanced your subgraph's built. - -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: - -| Feature | Name | -| ---------------------------------------------------- | ---------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | - -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - fullTextSearch - - nonFatalErrors -dataSources: ... -``` - -> Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. - -## Timeseries and Aggregations - -Timeseries and aggregations enable your subgraph to track statistics like daily average price, hourly total transfers, etc. - -This feature introduces two new types of subgraph entity. Timeseries entities record data points with timestamps. Aggregation entities perform pre-declared calculations on the Timeseries data points on an hourly or daily basis, then store the results for easy access via GraphQL. - -### Example Schema - -```graphql -type Data @entity(timeseries: true) { - id: Int8! - timestamp: Timestamp! - price: BigDecimal! -} - -type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { - id: Int8! - timestamp: Timestamp! - sum: BigDecimal! @aggregate(fn: "sum", arg: "price") -} -``` - -### Defining Timeseries and Aggregations - -Timeseries entities are defined with `@entity(timeseries: true)` in schema.graphql. Every timeseries entity must have a unique ID of the int8 type, a timestamp of the Timestamp type, and include data that will be used for calculation by aggregation entities. These Timeseries entities can be saved in regular trigger handlers, and act as the “raw data” for the Aggregation entities. - -Aggregation entities are defined with `@aggregation` in schema.graphql. Every aggregation entity defines the source from which it will gather data (which must be a Timeseries entity), sets the intervals (e.g., hour, day), and specifies the aggregation function it will use (e.g., sum, count, min, max, first, last). Aggregation entities are automatically calculated on the basis of the specified source at the end of the required interval. - -#### Available Aggregation Intervals - -- `hour`: sets the timeseries period every hour, on the hour. -- `day`: sets the timeseries period every day, starting and ending at 00:00. - -#### Available Aggregation Functions - -- `sum`: Total of all values. -- `count`: Number of values. -- `min`: Minimum value. -- `max`: Maximum value. -- `first`: First value in the period. -- `last`: Last value in the period. - -#### Example Aggregations Query - -```graphql -{ - stats(interval: "hour", where: { timestamp_gt: 1704085200 }) { - id - timestamp - sum - } -} -``` - -Note: - -To use Timeseries and Aggregations, a subgraph must have a spec version ≥1.1.0. Note that this feature might undergo significant changes that could affect backward compatibility. - -[Read more](https://github.com/graphprotocol/graph-node/blob/master/docs/aggregations.md) about Timeseries and Aggregations. - -## أخطاء غير فادحة - -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. - -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. - -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... -``` - -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: - -```graphql -foos(first: 100, subgraphError: allow) { - id -} - -_meta { - hasIndexingErrors -} -``` - -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: - -```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } -}, -"errors": [ - { - "message": "indexing_error" - } -] -``` - -## IPFS/Arweave File Data Sources - -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. - -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. - -### نظره عامة - -Rather than fetching files "in line" during handler execution, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. - -This is similar to the [existing data source templates](/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. - -> This replaces the existing `ipfs.cat` API - -### Upgrade guide - -#### Update `graph-ts` and `graph-cli` - -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 - -#### Add a new entity type which will be updated when files are found - -File data sources cannot access or update chain-based entities, but must update file specific entities. - -This may mean splitting out fields from existing entities into separate entities, linked together. - -Original combined entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! -} -``` - -New, split entity: - -```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! -} - -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! -} -``` - -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! - -> You can use [nested filters](/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. - -#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` - -This is the data source which will be spawned when a file of interest is identified. - -```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata - abis: - - name: Token - file: ./abis/Token.json -``` - -> Currently `abis` are required, though it is not possible to call contracts from within file data sources - -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#limitations) for more details. - -#### Create a new handler to process files - -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](/developing/graph-ts/api/#json-api)). - -The CID of the file as a readable string can be accessed via the `dataSource` as follows: - -```typescript -const cid = dataSource.stringParam() -``` - -Example handler: - -```typescript -import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' -import { TokenMetadata } from '../generated/schema' - -export function handleMetadata(content: Bytes): void { - let tokenMetadata = new TokenMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - if (value) { - const image = value.get('image') - const name = value.get('name') - const description = value.get('description') - const externalURL = value.get('external_url') - - if (name && image && description && externalURL) { - tokenMetadata.name = name.toString() - tokenMetadata.image = image.toString() - tokenMetadata.externalURL = externalURL.toString() - tokenMetadata.description = description.toString() - } - - tokenMetadata.save() - } -} -``` - -#### Spawn file data sources when required - -You can now create file data sources during execution of chain-based handlers: - -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave - -For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). - -For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/arweave-node-server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Irys (previously Bundlr), and Graph Node can also fetch files based on [Irys manifests](https://docs.irys.xyz/overview/gateways#indexing). - -Example: - -```typescript -import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' - -const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. - -export function handleTransfer(event: TransferEvent): void { - let token = Token.load(event.params.tokenId.toString()) - if (!token) { - token = new Token(event.params.tokenId.toString()) - token.tokenID = event.params.tokenId - - token.tokenURI = '/' + event.params.tokenId.toString() + '.json' - const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" - - token.ipfsURI = tokenIpfsHash - - TokenMetadataTemplate.create(tokenIpfsHash) - } - - token.updatedAtTimestamp = event.block.timestamp - token.owner = event.params.to.toHexString() - token.save() -} -``` - -This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. - -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. - -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file - -Congratulations, you are using file data sources! - -#### Deploying your subgraphs - -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. - -#### Limitations - -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: - -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers - -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! - -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. - -#### Best practices - -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. - -You can use [DataSource context](/developing/graph-ts/api/#entity-and-datasourcecontext) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. - -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. - -> We are working to improve the above recommendation, so queries only return the "most recent" version - -#### Known issues - -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. - -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-node/issues/4309)). Workaround is to create file data source handlers in a dedicated file. - -#### Examples - -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) - -#### المراجع - -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) - -## Indexed Argument Filters / Topic Filters - -> **Requires**: [SpecVersion](#specversion-releases) >= `1.2.0` - -Topic filters, also known as indexed argument filters, are a powerful feature in subgraphs that allow users to precisely filter blockchain events based on the values of their indexed arguments. - -- These filters help isolate specific events of interest from the vast stream of events on the blockchain, allowing subgraphs to operate more efficiently by focusing only on relevant data. - -- This is useful for creating personal subgraphs that track specific addresses and their interactions with various smart contracts on the blockchain. - -### How Topic Filters Work - -When a smart contract emits an event, any arguments that are marked as indexed can be used as filters in a subgraph's manifest. This allows the subgraph to listen selectively for events that match these indexed arguments. - -- The event's first indexed argument corresponds to `topic1`, the second to `topic2`, and so on, up to `topic3`, since the Ethereum Virtual Machine (EVM) allows up to three indexed arguments per event. - -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -contract Token { - // Event declaration with indexed parameters for addresses - event Transfer(address indexed from, address indexed to, uint256 value); - - // Function to simulate transferring tokens - function transfer(address to, uint256 value) public { - // Emitting the Transfer event with from, to, and value - emit Transfer(msg.sender, to, value); - } -} -``` - -In this example: - -- The `Transfer` event is used to log transactions of tokens between addresses. -- The `from` and `to` parameters are indexed, allowing event listeners to filter and monitor transfers involving specific addresses. -- The `transfer` function is a simple representation of a token transfer action, emitting the Transfer event whenever it is called. - -#### Configuration in Subgraphs - -Topic filters are defined directly within the event handler configuration in the subgraph manifest. Here is how they are configured: - -```yaml -eventHandlers: - - event: SomeEvent(indexed uint256, indexed address, indexed uint256) - handler: handleSomeEvent - topic1: ['0xValue1', '0xValue2'] - topic2: ['0xAddress1', '0xAddress2'] - topic3: ['0xValue3'] -``` - -In this setup: - -- `topic1` corresponds to the first indexed argument of the event, `topic2` to the second, and `topic3` to the third. -- Each topic can have one or more values, and an event is only processed if it matches one of the values in each specified topic. - -#### Filter Logic - -- Within a Single Topic: The logic functions as an OR condition. The event will be processed if it matches any one of the listed values in a given topic. -- Between Different Topics: The logic functions as an AND condition. An event must satisfy all specified conditions across different topics to trigger the associated handler. - -#### Example 1: Tracking Direct Transfers from Address A to Address B - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleDirectedTransfer - topic1: ['0xAddressA'] # Sender Address - topic2: ['0xAddressB'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` is the receiver. -- The subgraph will only index transactions that occur directly from `0xAddressA` to `0xAddressB`. - -#### Example 2: Tracking Transactions in Either Direction Between Two or More Addresses - -```yaml -eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransferToOrFrom - topic1: ['0xAddressA', '0xAddressB', '0xAddressC'] # Sender Address - topic2: ['0xAddressB', '0xAddressC'] # Receiver Address -``` - -In this configuration: - -- `topic1` is configured to filter `Transfer` events where `0xAddressA`, `0xAddressB`, `0xAddressC` is the sender. -- `topic2` is configured to filter `Transfer` events where `0xAddressB` and `0xAddressC` is the receiver. -- The subgraph will index transactions that occur in either direction between multiple addresses allowing for comprehensive monitoring of interactions involving all addresses. - -## Declared eth_call - -> Note: This is an experimental feature that is not currently available in a stable Graph Node release yet. You can only use it in Subgraph Studio or your self-hosted node. - -Declarative `eth_calls` are a valuable subgraph feature that allows `eth_calls` to be executed ahead of time, enabling `graph-node` to execute them in parallel. - -This feature does the following: - -- Significantly improves the performance of fetching data from the Ethereum blockchain by reducing the total time for multiple calls and optimizing the subgraph's overall efficiency. -- Allows faster data fetching, resulting in quicker query responses and a better user experience. -- Reduces wait times for applications that need to aggregate data from multiple Ethereum calls, making the data retrieval process more efficient. - -### Key Concepts - -- Declarative `eth_calls`: Ethereum calls that are defined to be executed in parallel rather than sequentially. -- Parallel Execution: Instead of waiting for one call to finish before starting the next, multiple calls can be initiated simultaneously. -- Time Efficiency: The total time taken for all the calls changes from the sum of the individual call times (sequential) to the time taken by the longest call (parallel). - -#### Scenario without Declarative `eth_calls` - -Imagine you have a subgraph that needs to make three Ethereum calls to fetch data about a user's transactions, balance, and token holdings. - -Traditionally, these calls might be made sequentially: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Total time taken = 3 + 2 + 4 = 9 seconds - -#### Scenario with Declarative `eth_calls` - -With this feature, you can declare these calls to be executed in parallel: - -1. Call 1 (Transactions): Takes 3 seconds -2. Call 2 (Balance): Takes 2 seconds -3. Call 3 (Token Holdings): Takes 4 seconds - -Since these calls are executed in parallel, the total time taken is equal to the time taken by the longest call. - -Total time taken = max (3, 2, 4) = 4 seconds - -#### How it Works - -1. Declarative Definition: In the subgraph manifest, you declare the Ethereum calls in a way that indicates they can be executed in parallel. -2. Parallel Execution Engine: The Graph Node's execution engine recognizes these declarations and runs the calls simultaneously. -3. Result Aggregation: Once all calls are complete, the results are aggregated and used by the subgraph for further processing. - -#### Example Configuration in Subgraph Manifest - -Declared `eth_calls` can access the `event.address` of the underlying event as well as all the `event.params`. - -`Subgraph.yaml` using `event.address`: - -```yaml -eventHandlers: -event: Swap(indexed address,indexed address,int256,int256,uint160,uint128,int24) -handler: handleSwap -calls: - global0X128: Pool[event.address].feeGrowthGlobal0X128() - global1X128: Pool[event.address].feeGrowthGlobal1X128() -``` - -Details for the example above: - -- `global0X128` is the declared `eth_call`. -- The text (`global0X128`) is the label for this `eth_call` which is used when logging errors. -- The text (`Pool[event.address].feeGrowthGlobal0X128()`) is the actual `eth_call` that will be executed, which is in the form of `Contract[address].function(arguments)` -- The `address` and `arguments` can be replaced with variables that will be available when the handler is executed. - -`Subgraph.yaml` using `event.params` - -```yaml -calls: - - ERC20DecimalsToken0: ERC20[event.params.token0].decimals() -``` - -### Grafting على Subgraphs موجودة - -> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). - -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. - -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: - -```yaml -description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number -``` - -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. - -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. - -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: - -- يضيف أو يزيل أنواع الكيانات -- يزيل الصفات من أنواع الكيانات -- يضيف صفات nullable لأنواع الكيانات -- يحول صفات non-nullable إلى صفات nullable -- يضيف قيما إلى enums -- يضيف أو يزيل الواجهات -- يغير للكيانات التي يتم تنفيذ الواجهة لها - -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. diff --git a/website/pages/ar/developing/creating-a-subgraph/install-the-cli.mdx b/website/pages/ar/developing/creating-a-subgraph/install-the-cli.mdx deleted file mode 100644 index b18e9aa8f7fb..000000000000 --- a/website/pages/ar/developing/creating-a-subgraph/install-the-cli.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: قم بتثبيت Graph CLI ---- - -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key) in [Subgraph Studio](https://thegraph.com/studio/apikeys/). It is recommended that you add signal to your subgraph with at least 3,000 GRT to attract 2-3 Indexers. To learn more about signaling, check out [curating](/network/curating/). - -## نظره عامة - -The [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) is a command-line interface that facilitates developers' commands for The Graph. It processes a [subgraph manifest](/creating-a-subgraph/subgraph-manifest/) and compiles the [mappings](/creating-a-subgraph/assemblyscript-mappings/) to create the files you will need to deploy the subgraph to [Subgraph Studio](https://thegraph.com/studio/) and the network. - -## Getting Started - -### قم بتثبيت Graph CLI - -The Graph CLI is written in TypeScript, and you must have `node` and either `npm` or `yarn` installed to use it. Check for the [most recent](https://github.com/graphprotocol/graph-tooling/releases?q=%40graphprotocol%2Fgraph-cli&expanded=true) CLI version. - -On your local machine, run one of the following commands: - -#### Using [npm](https://www.npmjs.com/) - -```bash -npm install -g @graphprotocol/graph-cli@latest -``` - -#### Using [yarn](https://yarnpkg.com/) - -```bash -yarn global add @graphprotocol/graph-cli -``` - -The `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. If you already have a smart contract deployed to your preferred network, you can bootstrap a new subgraph from that contract to get started. - -## إنشاء الـ Subgraph - -### من عقد موجود - -The following command creates a subgraph that indexes all events of an existing contract: - -```sh -graph init \ - --product subgraph-studio - --from-contract \ - [--network ] \ - [--abi ] \ - [] -``` - -- The command tries to retrieve the contract ABI from Etherscan. - - - The Graph CLI relies on a public RPC endpoint. While occasional failures are expected, retries typically resolve this issue. If failures persist, consider using a local ABI. - -- If any of the optional arguments are missing, it guides you through an interactive form. - -- The `` is the ID of your subgraph in [Subgraph Studio](https://thegraph.com/studio/). It can be found on your subgraph details page. - -### من مثال Subgraph - -The following command initializes a new project from an example subgraph: - -```sh -graph init --from-example=example-subgraph -``` - -- The [example subgraph](https://github.com/graphprotocol/example-subgraph) is based on the Gravity contract by Dani Grant, which manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. - -- The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. - -### Add New `dataSources` to an Existing Subgraph - -`dataSources` are key components of subgraphs. They define the sources of data that the subgraph indexes and processes. A `dataSource` specifies which smart contract to listen to, which events to process, and how to handle them. - -Recent versions of the Graph CLI supports adding new `dataSources` to an existing subgraph through the `graph add` command: - -```sh -graph add
[] - -Options: - - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") -``` - -#### Specifics - -The `graph add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option) and creates a new `dataSource`, similar to how the `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. This allows you to index implementation contracts from their proxy contracts. - -- The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: - - - If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. - - - If `false`: a new `entity` & `event` handler should be created with `${dataSourceName}{EventName}`. - -- The contract `address` will be written to the `networks.json` for the relevant network. - -> Note: When using the interactive CLI, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. - -### Getting The ABIs - -يجب أن تتطابق ملف (ملفات) ABI مع العقد (العقود) الخاصة بك. هناك عدة طرق للحصول على ملفات ABI: - -- إذا كنت تقوم ببناء مشروعك الخاص ، فمن المحتمل أن تتمكن من الوصول إلى أحدث ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`npx hardhat compile`](https://hardhat.org/hardhat-runner/docs/guides/compile-contracts#compiling-your-contracts) or using `solc` to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. - -## SpecVersion Releases - -| الاصدار | ملاحظات الإصدار | -| :-: | --- | -| 1.2.0 | Added support for [Indexed Argument Filtering](/#indexed-argument-filters--topic-filters) & declared `eth_call` | -| 1.1.0 | Supports [Timeseries & Aggregations](#timeseries-and-aggregations). Added support for type `Int8` for `id`. | -| 1.0.0 | Supports [`indexerHints`](/developing/creating-a-subgraph/#indexer-hints) feature to prune subgraphs | -| 0.0.9 | Supports `endBlock` feature | -| 0.0.8 | Added support for polling [Block Handlers](/developing/creating-a-subgraph/#polling-filter) and [Initialisation Handlers](/developing/creating-a-subgraph/#once-filter). | -| 0.0.7 | Added support for [File Data Sources](/developing/creating-a-subgraph/#file-data-sources). | -| 0.0.6 | Supports fast [Proof of Indexing](/network/indexing/#what-is-a-proof-of-indexing-poi) calculation variant. | -| 0.0.5 | Added support for event handlers having access to transaction receipts. | -| 0.0.4 | Added support for managing subgraph features. | diff --git a/website/pages/ar/developing/creating-a-subgraph/ql-schema.mdx b/website/pages/ar/developing/creating-a-subgraph/ql-schema.mdx deleted file mode 100644 index 20b4acef827a..000000000000 --- a/website/pages/ar/developing/creating-a-subgraph/ql-schema.mdx +++ /dev/null @@ -1,312 +0,0 @@ ---- -title: The Graph QL Schema ---- - -## نظره عامة - -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. - -> Note: If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api/) section. - -### Defining Entities - -Before defining entities, it is important to take a step back and think about how your data is structured and linked. - -- All queries will be made against the data model defined in the subgraph schema. As a result, the design of the subgraph schema should be informed by the queries that your application will need to perform. -- It may be useful to imagine entities as "objects containing data", rather than as events or functions. -- You define entity types in `schema.graphql`, and Graph Node will generate top-level fields for querying single instances and collections of that entity type. -- Each type that should be an entity is required to be annotated with an `@entity` directive. -- By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. - - Mutability comes at a price, so for entity types that will never be modified, such as those containing data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. - - If changes happen in the same block in which the entity was created, then mappings can make changes to immutable entities. Immutable entities are much faster to write and to query so they should be used whenever possible. - -#### مثال جيد - -The following `Gravatar` entity is structured around a Gravatar object and is a good example of how an entity could be defined. - -```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean -} -``` - -#### مثال سيئ - -The following example `GravatarAccepted` and `GravatarDeclined` entities are based around events. It is not recommended to map events or function calls to entities 1:1. - -```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} - -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String -} -``` - -#### الحقول الاختيارية والمطلوبة - -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If the field is a scalar field, you get an error when you try to store the entity. If the field references another entity then you get this error: - -``` -'Null value resolved for non-null field 'name -``` - -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. - -For some entity types the `id` for `Bytes!` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id) ` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. - -### أنواع المقاييس المضمنة - -#### المقاييس المدعومة من GraphQL - -The following scalars are supported in the GraphQL API: - -| النوع | الوصف | -| --- | --- | -| `Bytes` | مصفوفة Byte ، ممثلة كسلسلة سداسية عشرية. يشيع استخدامها في Ethereum hashes وعناوينه. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to be a signed 32-bit integer. | -| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | -| `Timestamp` | It is an `i64` value in microseconds. Commonly used for `timestamp` fields for timeseries and aggregations. | - -### Enums - -You can also create enums within a schema. Enums have the following syntax: - -```graphql -enum TokenStatus { - OriginalOwner - SecondOwner - ThirdOwner -} -``` - -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: - -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). - -### علاقات الكيانات - -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. - -Relationships are defined on entities just like any other field except that the type specified is that of another entity. - -#### العلاقات واحد-لواحد - -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: - -```graphql -type Transaction @entity(immutable: true) { - id: Bytes! - transactionReceipt: TransactionReceipt -} - -type TransactionReceipt @entity(immutable: true) { - id: Bytes! - transaction: Transaction -} -``` - -#### علاقات واحد-لمتعدد - -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -### البحث العكسي - -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. - -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. - -#### مثال - -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: - -```graphql -type Token @entity(immutable: true) { - id: Bytes! - tokenBalances: [TokenBalance!]! @derivedFrom(field: "token") -} - -type TokenBalance @entity { - id: Bytes! - amount: Int! - token: Token! -} -``` - -#### علاقات متعدد_لمتعدد - -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. - -#### مثال - -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [User!]! -} - -type User @entity { - id: Bytes! - name: String! - organizations: [Organization!]! @derivedFrom(field: "members") -} -``` - -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like - -```graphql -type Organization @entity { - id: Bytes! - name: String! - members: [UserOrganization!]! @derivedFrom(field: "organization") -} - -type User @entity { - id: Bytes! - name: String! - organizations: [UserOrganization!] @derivedFrom(field: "user") -} - -type UserOrganization @entity { - id: Bytes! # Set to `user.id.concat(organization.id)` - user: User! - organization: Organization! -} -``` - -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: - -```graphql -query usersWithOrganizations { - users { - organizations { - # this is a UserOrganization entity - organization { - name - } - } - } -} -``` - -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. - -### إضافة تعليقات إلى المخطط (schema) - -As per GraphQL spec, comments can be added above schema entity attributes using the hash symbol `#`. This is illustrated in the example below: - -```graphql -type MyFirstEntity @entity { - # unique identifier and primary key of the entity - id: Bytes! - address: Bytes! -} -``` - -## تعريف حقول البحث عن النص الكامل - -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. - -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. - -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. - -```graphql -type _Schema_ - @fulltext( - name: "bandSearch" - language: en - algorithm: rank - include: [{ entity: "Band", fields: [{ name: "name" }, { name: "description" }, { name: "bio" }] }] - ) - -type Band @entity { - id: Bytes! - name: String! - description: String! - bio: String - wallet: Address - labels: [Label!]! - discography: [Album!]! - members: [Musician!]! -} -``` - -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. - -```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } -} -``` - -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. - -## اللغات المدعومة - -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". - -Supported language dictionaries: - -| Code | القاموس | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### خوارزميات التصنيف - -Supported algorithms for ordering results: - -| Algorithm | Description | -| ------------- | --------------------------------------------------------------- | -| rank | استخدم جودة مطابقة استعلام النص-الكامل (0-1) لترتيب النتائج. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | diff --git a/website/pages/ar/developing/creating-a-subgraph/starting-your-subgraph.mdx b/website/pages/ar/developing/creating-a-subgraph/starting-your-subgraph.mdx deleted file mode 100644 index f48efba92d85..000000000000 --- a/website/pages/ar/developing/creating-a-subgraph/starting-your-subgraph.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Starting Your Subgraph ---- - -## نظره عامة - -The Graph is home to thousands of subgraphs already available for query, so check [The Graph Explorer](https://thegraph.com/explorer) and find one that already matches your needs. - -When you create a [subgraph](/subgraphs/), you create a custom open API that extracts data from a blockchain, processes it, stores it, and makes it easy to query via GraphQL. - -Subgraph development ranges from simple scaffold subgraphs to advanced, specifically tailored subgraphs. - -### Start Building - -Start the process and build a subgraph that matches your needs: - -1. [Install the CLI](/developing/creating-a-subgraph/install-the-cli/) - Set up your infrastructure -2. [Subgraph Manifest](/developing/creating-a-subgraph/subgraph-manifest/) - Understand a subgraph's key component -3. [The Graph Ql Schema](/developing/creating-a-subgraph/ql-schema/) - Write your schema -4. [Writing AssemblyScript Mappings](/developing/creating-a-subgraph/assemblyscript-mappings/) - Write your mappings -5. [Advanced Features](/developing/creating-a-subgraph/advanced/) - Customize your subgraph with advanced features diff --git a/website/pages/ar/developing/creating-a-subgraph/subgraph-manifest.mdx b/website/pages/ar/developing/creating-a-subgraph/subgraph-manifest.mdx deleted file mode 100644 index 8c36c56b624a..000000000000 --- a/website/pages/ar/developing/creating-a-subgraph/subgraph-manifest.mdx +++ /dev/null @@ -1,534 +0,0 @@ ---- -title: Subgraph Manifest ---- - -## نظره عامة - -The subgraph manifest, `subgraph.yaml`, defines the smart contracts & network your subgraph will index, the events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. - -The **subgraph definition** consists of the following files: - -- `subgraph.yaml`: Contains the subgraph manifest - -- `schema.graphql`: A GraphQL schema defining the data stored for your subgraph and how to query it via GraphQL - -- `mapping.ts`: [AssemblyScript Mappings](https://github.com/AssemblyScript/assemblyscript) code that translates event data into entities defined in your schema (e.g. `mapping.ts` in this guide) - -### Subgraph Capabilities - -A single subgraph can: - -- Index data from multiple smart contracts (but not multiple networks). - -- Index data from IPFS files using File Data Sources. - -- Add an entry for each contract that requires indexing to the `dataSources` array. - -The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). - -For the example subgraph listed above, `subgraph.yaml` is: - -```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: ./schema.graphql -indexerHints: - prune: auto -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' - abi: Gravity - startBlock: 6175244 - endBlock: 7175245 - context: - foo: - type: Bool - data: true - bar: - type: String - data: 'bar' - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - - event: UpdatedGravatar(uint256,address,string,string) - handler: handleUpdatedGravatar - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCall - filter: - kind: call - file: ./src/mapping.ts -``` - -## Subgraph Entries - -> Important Note: Be sure you populate your subgraph manifest with all handlers and [entities](/developing/creating-a-subgraph/ql-schema/). - -الإدخالات الهامة لتحديث manifest هي: - -- `specVersion`: a semver version that identifies the supported manifest structure and functionality for the subgraph. The latest version is `1.2.0`. See [specVersion releases](#specversion-releases) section to see more details on features & releases. - -- `description`: a human-readable description of what the subgraph is. This description is displayed in Graph Explorer when the subgraph is deployed to Subgraph Studio. - -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed in Graph Explorer. - -- `features`: a list of all used [feature](#experimental-features) names. - -- `indexerHints.prune`: Defines the retention of historical block data for a subgraph. See [prune](#prune) in [indexerHints](#indexer-hints) section. - -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. - -- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. - -- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. - -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. - -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. - -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. - -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. - -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. - -## Event Handlers - -Event handlers in a subgraph react to specific events emitted by smart contracts on the blockchain and trigger handlers defined in the subgraph's manifest. This enables subgraphs to process and store event data according to defined logic. - -### Defining an Event Handler - -An event handler is declared within a data source in the subgraph's YAML configuration. It specifies which events to listen for and the corresponding function to execute when those events are detected. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - eventHandlers: - - event: Approval(address,address,uint256) - handler: handleApproval - - event: Transfer(address,address,uint256) - handler: handleTransfer - topic1: ['0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045', '0xc8dA6BF26964aF9D7eEd9e03E53415D37aA96325'] # Optional topic filter which filters only events with the specified topic. -``` - -## معالجات الاستدعاء(Call Handlers) - -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. - -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. - -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. - -### تعريف معالج الاستدعاء - -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar -``` - -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. - -### دالة الـ Mapping - -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: - -```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() -} -``` - -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. - -## معالجات الكتلة - -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. - -### الفلاتر المدعومة - -#### Call Filter - -```yaml -filter: - kind: call -``` - -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ - -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. - -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. - -```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call -``` - -#### Polling Filter - -> **Requires `specVersion` >= 0.0.8** -> -> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleBlock - filter: - kind: polling - every: 10 -``` - -The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. - -#### Once Filter - -> **Requires `specVersion` >= 0.0.8** -> -> **Note:** Once filters are only available on dataSources of `kind: ethereum`. - -```yaml -blockHandlers: - - handler: handleOnce - filter: - kind: once -``` - -The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. - -```ts -export function handleOnce(block: ethereum.Block): void { - let data = new InitialData(Bytes.fromUTF8('initial')) - data.data = 'Setup data here' - data.save() -} -``` - -### دالة الـ Mapping - -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() -} -``` - -## أحداث الـ مجهول - -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: - -```yaml -eventHandlers: - - event: LogNote(bytes4,address,bytes32,bytes32,uint256,bytes) - topic0: '0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31' - handler: handleGive -``` - -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. - -## Transaction Receipts in Event Handlers - -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. - -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. - -```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true -``` - -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. - -## Order of Triggering Handlers - -يتم ترتيب المشغلات (triggers) لمصدر البيانات داخل الكتلة باستخدام العملية التالية: - -1. يتم ترتيب triggers الأحداث والاستدعاءات أولا من خلال فهرس الإجراء داخل الكتلة. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. يتم تشغيل مشغلات الكتلة بعد مشغلات الحدث والاستدعاء، بالترتيب المحدد في الـ manifest. - -قواعد الترتيب هذه عرضة للتغيير. - -> **Note:** When new [dynamic data source](#data-source-templates-for-dynamically-created-contracts) are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -## قوالب مصدر البيانات - -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. - -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. - -### مصدر البيانات للعقد الرئيسي - -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: Factory - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - Directory - abis: - - name: Factory - file: ./abis/factory.json - eventHandlers: - - event: NewExchange(address,address) - handler: handleNewExchange -``` - -### قوالب مصدر البيانات للعقود التي تم إنشاؤها ديناميكيا - -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. - -```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet - source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange - abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity -``` - -### إنشاء قالب مصدر البيانات - -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) -} -``` - -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. -> -> إذا كانت الكتل السابقة تحتوي على بيانات ذات صلة بمصدر البيانات الجديد ، فمن الأفضل فهرسة تلك البيانات من خلال قراءة الحالة الحالية للعقد وإنشاء كيانات تمثل تلك الحالة في وقت إنشاء مصدر البيانات الجديد. - -### سياق مصدر البيانات - -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: - -```typescript -import { Exchange } from '../generated/templates' - -export function handleNewExchange(event: NewExchange): void { - let context = new DataSourceContext() - context.setString('tradingPair', event.params.tradingPair) - Exchange.createWithContext(event.params.exchange, context) -} -``` - -Inside a mapping of the `Exchange` template, the context can then be accessed: - -```typescript -import { dataSource } from '@graphprotocol/graph-ts' - -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') -``` - -There are setters and getters like `setString` and `getString` for all value types. - -## كتل البدء - -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. - -```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent -``` - -> **Note:** The contract creation block can be quickly looked up on Etherscan: -> -> 1. ابحث عن العقد بإدخال عنوانه في شريط البحث. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. قم بتحميل صفحة تفاصيل الإجراء(transaction) حيث ستجد كتلة البدء لذلك العقد. - -## Indexer Hints - -The `indexerHints` setting in a subgraph's manifest provides directives for indexers on processing and managing a subgraph. It influences operational decisions across data handling, indexing strategies, and optimizations. Presently, it features the `prune` option for managing historical data retention or pruning. - -> This feature is available from `specVersion: 1.0.0` - -### Prune - -`indexerHints.prune`: Defines the retention of historical block data for a subgraph. Options include: - -1. `"never"`: No pruning of historical data; retains the entire history. -2. `"auto"`: Retains the minimum necessary history as set by the indexer, optimizing query performance. -3. A specific number: Sets a custom limit on the number of historical blocks to retain. - -``` - indexerHints: - prune: auto -``` - -> The term "history" in this context of subgraphs is about storing data that reflects the old states of mutable entities. - -History as of a given block is required for: - -- [Time travel queries](/querying/graphql-api/#time-travel-queries), which enable querying the past states of these entities at specific blocks throughout the subgraph's history -- Using the subgraph as a [graft base](/developing/creating-a-subgraph/#grafting-onto-existing-subgraphs) in another subgraph, at that block -- Rewinding the subgraph back to that block - -If historical data as of the block has been pruned, the above capabilities will not be available. - -> Using `"auto"` is generally recommended as it maximizes query performance and is sufficient for most users who do not require access to extensive historical data. - -For subgraphs leveraging [time travel queries](/querying/graphql-api/#time-travel-queries), it's advisable to either set a specific number of blocks for historical data retention or use `prune: never` to keep all historical entity states. Below are examples of how to configure both options in your subgraph's settings: - -To retain a specific amount of historical data: - -``` - indexerHints: - prune: 1000 # Replace 1000 with the desired number of blocks to retain -``` - -To preserve the complete history of entity states: - -``` -indexerHints: - prune: never -``` diff --git a/website/pages/ar/developing/developer-faqs.mdx b/website/pages/ar/developing/developer-faqs.mdx deleted file mode 100644 index 01aa712bb83c..000000000000 --- a/website/pages/ar/developing/developer-faqs.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: الأسئلة الشائعة للمطورين ---- - -This page summarizes some of the most common questions for developers building on The Graph. - -## Subgraph Related - -### 1. What is a subgraph? - -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using The Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available for subgraph consumers to query. - -### 2. What is the first step to create a subgraph? - -To successfully create a subgraph, you will need to install The Graph CLI. Review the [Quick Start](/quick-start/) to get started. For detailed information, see [Creating a Subgraph](/developing/creating-a-subgraph/). - -### 3. Can I still create a subgraph if my smart contracts don't have events? - -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are the fastest way to retrieve useful data. - -If the contracts you work with do not contain events, your subgraph can use call and block handlers to trigger indexing. However, this is not recommended, as performance will be significantly slower. - -### 4. Can I change the GitHub account associated with my subgraph? - -No. Once a subgraph is created, the associated GitHub account cannot be changed. Please make sure to carefully consider this before creating your subgraph. - -### 5. How do I update a subgraph on mainnet? - -You can deploy a new version of your subgraph to Subgraph Studio using the CLI. This action maintains your subgraph private, but once you’re happy with it, you can publish to Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. - -### 6. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? - -يجب عليك إعادة نشر ال الفرعيةرسم بياني ، ولكن إذا لم يتغير الفرعيةرسم بياني (ID (IPFS hash ، فلن يضطر إلى المزامنة من البداية. - -### 7. How do I call a contract function or access a public state variable from my subgraph mappings? - -Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/developing/graph-ts/api/#access-to-smart-contract-state). - -### 8. Can I import `ethers.js` or other JS libraries into my subgraph mappings? - -Not currently, as mappings are written in AssemblyScript. - -One possible alternative solution to this is to store raw data in entities and perform logic that requires JS libraries on the client. - -### 9. When listening to multiple contracts, is it possible to select the contract order to listen to events? - -ضمن ال Subgraph ، تتم معالجة الأحداث دائمًا بالترتيب الذي تظهر به في الكتل ، بغض النظر عما إذا كان ذلك عبر عقود متعددة أم لا. - -### 10. How are templates different from data sources? - -Templates allow you to create data sources quickly, while your subgraph is indexing. Your contract might spawn new contracts as people interact with it. Since you know the shape of those contracts (ABI, events, etc.) upfront, you can define how you want to index them in a template. When they are spawned, your subgraph will create a dynamic data source by supplying the contract address. - -Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph#data-source-templates). - -### 11. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another dataSource in `subgraph.yaml` after running `graph init`? - -Yes. On `graph init` command itself you can add multiple dataSources by entering contracts one after the other. - -You can also use `graph add` command to add a new dataSource. - -### 12. In what order are the event, block, and call handlers triggered for a data source? - -Event and call handlers are first ordered by transaction index within the block. Event and call handlers within the same transaction are ordered using a convention: event handlers first then call handlers, each type respecting the order they are defined in the manifest. Block handlers are run after event and call handlers, in the order they are defined in the manifest. Also these ordering rules are subject to change. - -When new dynamic data source are created, the handlers defined for dynamic data sources will only start processing after all existing data source handlers are processed, and will repeat in the same sequence whenever triggered. - -### 13. How do I make sure I'm using the latest version of graph-node for my local deployments? - -يمكنك تشغيل الأمر التالي: - -```sh -docker pull graphprotocol/graph-node:latest -``` - -> Note: docker / docker-compose will always use whatever graph-node version was pulled the first time you ran it, so make sure you're up to date with the latest version of graph-node. - -### 14. What is the recommended way to build "autogenerated" ids for an entity when handling events? - -إذا تم إنشاء كيان واحد فقط أثناء الحدث ولم يكن هناك أي شيء متاح بشكل أفضل ، فسيكون hash الإجراء + فهرس السجل فريدا. يمكنك إبهامها عن طريق تحويلها إلى Bytes ثم تمريرها عبر `crypto.keccak256` ولكن هذا لن يجعلها فريدة من نوعها. - -### 15. Can I delete my subgraph? - -Yes, you can [delete](/managing/delete-a-subgraph/) and [transfer](/managing/transfer-a-subgraph/) your subgraph. - -## Network Related - -### 16. What networks are supported by The Graph? - -You can find the list of the supported networks [here](/developing/supported-networks). - -### 17. Is it possible to differentiate between networks (mainnet, Sepolia, local) within event handlers? - -نعم. يمكنك القيام بذلك عن طريق استيراد `graph-ts` كما في المثال أدناه: - -```javascript -'import { dataSource } from '@graphprotocol/graph-ts - -()dataSource.network -()dataSource.address -``` - -### 18. Do you support block and call handlers on Sepolia? - -Yes. Sepolia supports block handlers, call handlers and event handlers. It should be noted that event handlers are far more performant than the other two handlers, and they are supported on every EVM-compatible network. - -## Indexing & Querying Related - -### 19. Is it possible to specify what block to start indexing on? - -Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the dataSource starts indexing from. In most cases, we suggest using the block where the contract was created: [Start blocks](/developing/creating-a-subgraph#start-blocks) - -### 20. What are some tips to increase the performance of indexing? My subgraph is taking a very long time to sync - -Yes, you should take a look at the optional start block feature to start indexing from the block where the contract was deployed: [Start blocks](/developing/creating-a-subgraph#start-blocks) - -### 21. Is there a way to query the subgraph directly to determine the latest block number it has indexed? - -نعم! جرب الأمر التالي ، مع استبدال "Organization / subgraphName" بالمؤسسة واسم الـ subgraph الخاص بك: - -```sh -curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"organization/subgraphName\") { chains { latestBlock { hash number }}}}"}' https://api.thegraph.com/index-node/graphql -``` - -### 22. Is there a limit to how many objects The Graph can return per query? - -By default, query responses are limited to 100 items per collection. If you want to receive more, you can go up to 1000 items per collection and beyond that, you can paginate with: - -```graphql -someCollection(first: 1000, skip: ) { ... } -``` - -### 23. If my dapp frontend uses The Graph for querying, do I need to write my API key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? - -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. - -## Miscellaneous - -### 24. Is it possible to use Apollo Federation on top of graph-node? - -Federation is not supported yet. At the moment, you can use schema stitching, either on the client or via a proxy service. - -### 25. I want to contribute or add a GitHub issue. Where can I find the open source repositories? - -- [graph-node](https://github.com/graphprotocol/graph-node) -- [graph-tooling](https://github.com/graphprotocol/graph-tooling) -- [graph-docs](https://github.com/graphprotocol/docs) -- [graph-client](https://github.com/graphprotocol/graph-client) diff --git a/website/pages/ar/developing/graph-ts/_meta.js b/website/pages/ar/developing/graph-ts/_meta.js deleted file mode 100644 index 466762da9ce8..000000000000 --- a/website/pages/ar/developing/graph-ts/_meta.js +++ /dev/null @@ -1,5 +0,0 @@ -import meta from '../../../en/developing/graph-ts/_meta.js' - -export default { - ...meta, -} diff --git a/website/pages/ar/developing/graph-ts/api.mdx b/website/pages/ar/developing/graph-ts/api.mdx deleted file mode 100644 index 2de72189db87..000000000000 --- a/website/pages/ar/developing/graph-ts/api.mdx +++ /dev/null @@ -1,890 +0,0 @@ ---- -title: AssemblyScript API ---- - -> Note: If you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, then you're using an older version of AssemblyScript. It is recommended to review the [`Migration Guide`](/release-notes/assemblyscript-migration-guide). - -Learn what built-in APIs can be used when writing subgraph mappings. There are two kinds of APIs available out of the box: - -- The [Graph TypeScript library](https://github.com/graphprotocol/graph-tooling/tree/main/packages/ts) (`graph-ts`) -- Code generated from subgraph files by `graph codegen` - -You can also add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). - -Since language mappings are written in AssemblyScript, it is useful to review the language and standard library features from the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki). - -## مرجع API - -The `@graphprotocol/graph-ts` library provides the following APIs: - -- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. -- A `store` API to load and save entities from and to the Graph Node store. -- A `log` API to log messages to the Graph Node output and Graph Explorer. -- An `ipfs` API to load files from IPFS. -- A `json` API to parse JSON data. -- A `crypto` API to use cryptographic functions. -- الأوامر الأساسية منخفضة المستوى للترجمة بين أنظمة الأنواع المختلفة مثل Ethereum و JSON و GraphQL و AssemblyScript. - -### إصدارات - -The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. - -| الاصدار | ملاحظات الإصدار | -| :-: | --- | -| 0.0.9 | Adds new host functions [`eth_get_balance`](#balance-of-an-address) & [`hasCode`](#check-if-an-address-is-a-contract-or-eoa) | -| 0.0.8 | Adds validation for existence of fields in the schema when saving an entity. | -| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | -| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | -| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | -| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | -| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | -| 0.0.2 | Added `input` field to the Ethereum Transaction object | - -### الأنواع المضمنة (Built-in) - -Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://www.assemblyscript.org/types.html). - -The following additional types are provided by `@graphprotocol/graph-ts`. - -#### ByteArray - -```typescript -'import { ByteArray } from '@graphprotocol/graph-ts -``` - -`ByteArray` represents an array of `u8`. - -_Construction_ - -- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. -- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. - -_Type conversions_ - -- `toHexString(): string` - Converts to a hex string prefixed with `0x`. -- `toString(): string` - Interprets the bytes as a UTF-8 string. -- `toBase58(): string` - Encodes the bytes into a base58 string. -- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. -- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. - -_Operators_ - -- `equals(y: ByteArray): bool` – can be written as `x == y`. -- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` -- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` - -#### BigDecimal - -```typescript -'import { BigDecimal } from '@graphprotocol/graph-ts -``` - -`BigDecimal` is used to represent arbitrary precision decimals. - -> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar/bigdecimal.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. - -_Construction_ - -- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. -- `static fromString(s: string): BigDecimal` – parses from a decimal string. - -_Type conversions_ - -- `toString(): string` – prints to a decimal string. - -_Math_ - -- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. -- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. -- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. -- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. -- `equals(y: BigDecimal): bool` – can be written as `x == y`. -- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. -- `lt(y: BigDecimal): bool` – can be written as `x < y`. -- `le(y: BigDecimal): bool` – can be written as `x <= y`. -- `gt(y: BigDecimal): bool` – can be written as `x > y`. -- `ge(y: BigDecimal): bool` – can be written as `x >= y`. -- `neg(): BigDecimal` - can be written as `-x`. - -#### BigInt - -```typescript -'import { BigInt } from '@graphprotocol/graph-ts -``` - -`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. - -The `BigInt` class has the following API: - -_Construction_ - -- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. - -- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. - -- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. - -- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. - - _Type conversions_ - -- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. - -- `x.toString(): string` – turns `BigInt` into a decimal number string. - -- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. - -- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. - -_Math_ - -- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. -- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. -- `x.times(y: BigInt): BigInt` – can be written as `x * y`. -- `x.div(y: BigInt): BigInt` – can be written as `x / y`. -- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. -- `x.equals(y: BigInt): bool` – can be written as `x == y`. -- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. -- `x.lt(y: BigInt): bool` – can be written as `x < y`. -- `x.le(y: BigInt): bool` – can be written as `x <= y`. -- `x.gt(y: BigInt): bool` – can be written as `x > y`. -- `x.ge(y: BigInt): bool` – can be written as `x >= y`. -- `x.neg(): BigInt` – can be written as `-x`. -- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. -- `x.isZero(): bool` – Convenience for checking if the number is zero. -- `x.isI32(): bool` – Check if the number fits in an `i32`. -- `x.abs(): BigInt` – Absolute value. -- `x.pow(exp: u8): BigInt` – Exponentiation. -- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. -- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. -- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. -- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. - -#### TypedMap - -```typescript -'import { TypedMap } from '@graphprotocol/graph-ts -``` - -`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). - -The `TypedMap` class has the following API: - -- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` -- `map.set(key: K, value: V): void` – sets the value of `key` to `value` -- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map -- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map -- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not - -#### Bytes - -```typescript -'import { Bytes } from '@graphprotocol/graph-ts -``` - -`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. - -The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: - -_Construction_ - -- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` -- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes - -_Type conversions_ - -- `b.toHex()` – returns a hexadecimal string representing the bytes in the array -- `b.toString()` – converts the bytes in the array to a string of unicode characters -- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) - -_Operators_ - -- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` -- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` - -#### العنوان - -```typescript -'import { Address } from '@graphprotocol/graph-ts -``` - -`Address` extends `Bytes` to represent Ethereum `address` values. - -It adds the following method on top of the `Bytes` API: - -- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string -- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error - -### مخزن API - -```typescript -'import { store } from '@graphprotocol/graph-ts -``` - -The `store` API allows to load, save and remove entities from and to the Graph Node store. - -Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. - -#### إنشاء الكيانات - -The following is a common pattern for creating entities from Ethereum events. - -```typescript -// Import the Transfer event class generated from the ERC20 ABI -import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' - -// Import the Transfer entity type generated from the GraphQL schema -import { Transfer } from '../generated/schema' - -// Transfer event handler -export function handleTransfer(event: TransferEvent): void { - // Create a Transfer entity, using the transaction hash as the entity ID - let id = event.transaction.hash - let transfer = new Transfer(id) - - // Set properties on the entity, using the event parameters - transfer.from = event.params.from - transfer.to = event.params.to - transfer.amount = event.params.amount - - // Save the entity to the store - transfer.save() -} -``` - -When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. - -Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. - -> Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. - -#### تحميل الكيانات من المخزن - -If an entity already exists, it can be loaded from the store with the following: - -```typescript -let id = event.transaction.hash // or however the ID is constructed -let transfer = Transfer.load(id) -if (transfer == null) { - transfer = new Transfer(id) -} - -// Use the Transfer entity as before -``` - -As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may be necessary to check for the `null` case before using the value. - -> Note: Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. - -#### Looking up entities created withing a block - -As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. - -The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a transaction from some on-chain event, and a later handler wants to access this transaction if it exists. - -- In the case where the transaction does not exist, the subgraph will have to go to the database simply to find out that the entity does not exist. If the subgraph author already knows that the entity must have been created in the same block, using `loadInBlock` avoids this database roundtrip. -- For some subgraphs, these missed lookups can contribute significantly to the indexing time. - -```typescript -let id = event.transaction.hash // or however the ID is constructed -let transfer = Transfer.loadInBlock(id) -if (transfer == null) { - transfer = new Transfer(id) -} - -// Use the Transfer entity as before -``` - -> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. - -#### Looking up derived entities - -As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. - -This enables loading derived entity fields from within an event handler. For example, given the following schema: - -```graphql -type Token @entity { - id: ID! - holder: Holder! - color: String -} - -type Holder @entity { - id: ID! - tokens: [Token!]! @derivedFrom(field: "holder") -} -``` - -The following code will load the `Token` entity that the `Holder` entity was derived from: - -```typescript -let holder = Holder.load('test-id') -// Load the Token entities associated with a given holder -let tokens = holder.tokens.load() -``` - -#### تحديث الكيانات الموجودة - -There are two ways to update an existing entity: - -1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. -2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. - -Changing properties is straight forward in most cases, thanks to the generated property setters: - -```typescript -let transfer = new Transfer(id) -transfer.from = ... -transfer.to = ... -transfer.amount = ... -``` - -It is also possible to unset properties with one of the following two instructions: - -```typescript -transfer.from.unset() -transfer.from = null -``` - -This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. - -Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. - -```typescript -// This won't work -entity.numbers.push(BigInt.fromI32(1)) -entity.save() - -// This will work -let numbers = entity.numbers -numbers.push(BigInt.fromI32(1)) -entity.numbers = numbers -entity.save() -``` - -#### إزالة الكيانات من المخزن - -There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: - -```typescript -import { store } from '@graphprotocol/graph-ts' -... -let id = event.transaction.hash -store.remove('Transfer', id) -``` - -### API إيثيريوم - -The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. - -#### دعم أنواع الإيثيريوم - -As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. - -With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. - -The following example illustrates this. Given a subgraph schema like - -```graphql -type Transfer @entity { - id: Bytes! - from: Bytes! - to: Bytes! - amount: BigInt! -} -``` - -and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: - -```typescript -let id = event.transaction.hash -let transfer = new Transfer(id) -transfer.from = event.params.from -transfer.to = event.params.to -transfer.amount = event.params.amount -transfer.save() -``` - -#### الأحداث وبيانات الكتلة/ الإجراء - -Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): - -```typescript -class Event { - address: Address - logIndex: BigInt - transactionLogIndex: BigInt - logType: string | null - block: Block - transaction: Transaction - parameters: Array - receipt: TransactionReceipt | null -} - -class Block { - hash: Bytes - parentHash: Bytes - unclesHash: Bytes - author: Address - stateRoot: Bytes - transactionsRoot: Bytes - receiptsRoot: Bytes - number: BigInt - gasUsed: BigInt - gasLimit: BigInt - timestamp: BigInt - difficulty: BigInt - totalDifficulty: BigInt - size: BigInt | null - baseFeePerGas: BigInt | null -} - -class Transaction { - hash: Bytes - index: BigInt - from: Address - to: Address | null - value: BigInt - gasLimit: BigInt - gasPrice: BigInt - input: Bytes - nonce: BigInt -} - -class TransactionReceipt { - transactionHash: Bytes - transactionIndex: BigInt - blockHash: Bytes - blockNumber: BigInt - cumulativeGasUsed: BigInt - gasUsed: BigInt - contractAddress: Address - logs: Array - status: BigInt - root: Bytes - logsBloom: Bytes -} - -class Log { - address: Address - topics: Array - data: Bytes - blockHash: Bytes - blockNumber: Bytes - transactionHash: Bytes - transactionIndex: BigInt - logIndex: BigInt - transactionLogIndex: BigInt - logType: string - removed: bool | null -} -``` - -#### الوصول إلى حالة العقد الذكي Smart Contract - -The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. - -A common pattern is to access the contract from which an event originates. This is achieved with the following code: - -```typescript -// Import the generated contract class and generated Transfer event class -import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' -// Import the generated entity class -import { Transfer } from '../generated/schema' - -export function handleTransfer(event: TransferEvent) { - // Bind the contract to the address that emitted the event - let contract = ERC20Contract.bind(event.address) - - // Access state variables and functions by calling them - let erc20Symbol = contract.symbol() -} -``` - -`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type - -As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. - -Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. - -#### معالجة الاستدعاءات المعادة - -If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. - -- For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: - -```typescript -let gravity = Gravity.bind(event.address) -let callResult = gravity.try_gravatarToOwner(gravatar) -if (callResult.reverted) { - log.info('getGravatar reverted', []) -} else { - let owner = callResult.value -} -``` - -> Note: A Graph node connected to a Geth or Infura client may not detect all reverts. If you rely on this, we recommend using a Graph Node connected to a Parity client. - -#### تشفير/فك تشفير ABI - -Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. - -```typescript -import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' - -let tupleArray: Array = [ - ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), - ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), -] - -let tuple = tupleArray as ethereum.Tuple - -let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! - -let decoded = ethereum.decode('(address,uint256)', encoded) -``` - -For more information: - -- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) -- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) -- More [complex example](https://github.com/graphprotocol/graph-node/blob/08da7cb46ddc8c09f448c5ea4b210c9021ea05ad/tests/integration-tests/host-exports/src/mapping.ts#L86). - -#### Balance of an Address - -The native token balance of an address can be retrieved using the `ethereum` module. This feature is available from `apiVersion: 0.0.9` which is defined `subgraph.yaml`. The `getBalance()` retrieves the balance of the specified address as of the end of the block in which the event is triggered. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -let address = Address.fromString('0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045') -let balance = ethereum.getBalance(address) // returns balance in BigInt -``` - -#### Check if an Address is a Contract or EOA - -To check whether an address is a smart contract address or an externally owned address (EOA), use the `hasCode()` function from the `ethereum` module which will return `boolean`. This feature is available from `apiVersion: 0.0.9` which is defined `subgraph.yaml`. - -```typescript -import { ethereum } from '@graphprotocol/graph-ts' - -let contractAddr = Address.fromString('0x2E645469f354BB4F5c8a05B3b30A929361cf77eC') -let isContract = ethereum.hasCode(contractAddr).inner // returns true - -let eoa = Address.fromString('0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045') -let isContract = ethereum.hasCode(eoa).inner // returns false -``` - -### Logging API - -```typescript -import { log } from '@graphprotocol/graph-ts -``` - -The `log` API allows subgraphs to log information to the Graph Node standard output as well as Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. - -The `log` API includes the following functions: - -- `log.debug(fmt: string, args: Array): void` - logs a debug message. -- `log.info(fmt: string, args: Array): void` - logs an informational message. -- `log.warning(fmt: string, args: Array): void` - logs a warning. -- `log.error(fmt: string, args: Array): void` - logs an error message. -- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. - -The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. - -```typescript -log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) -``` - -#### تسجيل قيمة واحدة أو أكثر - -##### تسجيل قيمة واحدة - -In the example below, the string value "A" is passed into an array to become`['A']` before being logged: - -```typescript -let myValue = 'A' - -export function handleSomeEvent(event: SomeEvent): void { - // Displays : "My value is: A" - log.info('My value is: {}', [myValue]) -} -``` - -##### تسجيل إدخال واحد من مصفوفة موجودة - -In the example below, only the first value of the argument array is logged, despite the array containing three values. - -```typescript -let myArray = ['A', 'B', 'C'] - -export function handleSomeEvent(event: SomeEvent): void { - // Displays : "My value is: A" (Even though three values are passed to `log.info`) - log.info('My value is: {}', myArray) -} -``` - -#### تسجيل إدخالات متعددة من مصفوفة موجودة - -Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. - -```typescript -let myArray = ['A', 'B', 'C'] - -export function handleSomeEvent(event: SomeEvent): void { - // Displays : "My first value is: A, second value is: B, third value is: C" - log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) -} -``` - -##### تسجيل إدخال محدد من مصفوفة موجودة - -To display a specific value in the array, the indexed value must be provided. - -```typescript -export function handleSomeEvent(event: SomeEvent): void { - // Displays : "My third value is C" - log.info('My third value is: {}', [myArray[2]]) -} -``` - -##### تسجيل معلومات الحدث - -The example below logs the block number, block hash and transaction hash from an event: - -```typescript -import { log } from '@graphprotocol/graph-ts' - -export function handleSomeEvent(event: SomeEvent): void { - log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ - event.block.number.toString(), // "47596000" - event.block.hash.toHexString(), // "0x..." - event.transaction.hash.toHexString(), // "0x..." - ]) -} -``` - -### IPFS API - -```typescript -'import { ipfs } from '@graphprotocol/graph-ts -``` - -Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. - -Given an IPFS hash or path, reading a file from IPFS is done as follows: - -```typescript -// Put this inside an event handler in the mapping -let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' -let data = ipfs.cat(hash) - -// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` -// that include files in directories are also supported -let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' -let data = ipfs.cat(path) -``` - -**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. - -It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: - -```typescript -import { JSONValue, Value } from '@graphprotocol/graph-ts' - -export function processItem(value: JSONValue, userData: Value): void { - // See the JSONValue documentation for details on dealing - // with JSON values - let obj = value.toObject() - let id = obj.get('id') - let title = obj.get('title') - - if (!id || !title) { - return - } - - // Callbacks can also created entities - let newItem = new Item(id) - newItem.title = title.toString() - newitem.parent = userData.toString() // Set parent to "parentId" - newitem.save() -} - -// Put this inside an event handler in the mapping -ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) - -// Alternatively, use `ipfs.mapJSON` -ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) -``` - -The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. - -On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. - -### Crypto API - -```typescript -'import { crypto } from '@graphprotocol/graph-ts -``` - -The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: - -- `crypto.keccak256(input: ByteArray): ByteArray` - -### JSON API - -```typescript -'import { json, JSONValueKind } from '@graphprotocol/graph-ts -``` - -JSON data can be parsed using the `json` API: - -- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence -- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed -- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` -- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed - -The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: - -```typescript -let value = json.fromBytes(...) -if (value.kind == JSONValueKind.BOOL) { - ... -} -``` - -In addition, there is a method to check if the value is `null`: - -- `value.isNull(): boolean` - -When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: - -- `value.toBool(): boolean` -- `value.toI64(): i64` -- `value.toF64(): f64` -- `value.toBigInt(): BigInt` -- `value.toString(): string` -- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) - -### مرجع تحويلات الأنواع - -| Source(s) | Destination | Conversion function | -| -------------------- | -------------------- | ---------------------------- | -| Address | Bytes | none | -| Address | String | s.toHexString() | -| BigDecimal | String | s.toString() | -| BigInt | BigDecimal | s.toBigDecimal() | -| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | -| BigInt | String (unicode) | s.toString() | -| BigInt | i32 | s.toI32() | -| Boolean | Boolean | none | -| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | -| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | -| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | -| Bytes | String (unicode) | s.toString() | -| Bytes | String (base58) | s.toBase58() | -| Bytes | i32 | s.toI32() | -| Bytes | u32 | s.toU32() | -| Bytes | JSON | json.fromBytes(s) | -| int8 | i32 | none | -| int32 | i32 | none | -| int32 | BigInt | BigInt.fromI32(s) | -| uint24 | i32 | none | -| int64 - int256 | BigInt | none | -| uint32 - uint256 | BigInt | none | -| JSON | boolean | s.toBool() | -| JSON | i64 | s.toI64() | -| JSON | u64 | s.toU64() | -| JSON | f64 | s.toF64() | -| JSON | BigInt | s.toBigInt() | -| JSON | string | s.toString() | -| JSON | Array | s.toArray() | -| JSON | Object | s.toObject() | -| String | Address | Address.fromString(s) | -| Bytes | Address | Address.fromBytes(s) | -| String | BigInt | BigInt.fromString(s) | -| String | BigDecimal | BigDecimal.fromString(s) | -| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | -| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | - -### البيانات الوصفية لمصدر البيانات - -You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: - -- `dataSource.address(): Address` -- `dataSource.network(): string` -- `dataSource.context(): DataSourceContext` - -### الكيان و DataSourceContext - -The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: - -- `setString(key: string, value: string): void` -- `setI32(key: string, value: i32): void` -- `setBigInt(key: string, value: BigInt): void` -- `setBytes(key: string, value: Bytes): void` -- `setBoolean(key: string, value: bool): void` -- `setBigDecimal(key, value: BigDecimal): void` -- `getString(key: string): string` -- `getI32(key: string): i32` -- `getBigInt(key: string): BigInt` -- `getBytes(key: string): Bytes` -- `getBoolean(key: string): boolean` -- `getBigDecimal(key: string): BigDecimal` - -### DataSourceContext in Manifest - -The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. - -Here is a YAML example illustrating the usage of various types in the `context` section: - -```yaml -dataSources: - - kind: ethereum/contract - name: ContractName - network: mainnet - context: - bool_example: - type: Bool - data: true - string_example: - type: String - data: 'hello' - int_example: - type: Int - data: 42 - int8_example: - type: Int8 - data: 127 - big_decimal_example: - type: BigDecimal - data: '10.99' - bytes_example: - type: Bytes - data: '0x68656c6c6f' - list_example: - type: List - data: - - type: Int - data: 1 - - type: Int - data: 2 - - type: Int - data: 3 - big_int_example: - type: BigInt - data: '1000000000000000000000000' -``` - -- `Bool`: Specifies a Boolean value (`true` or `false`). -- `String`: Specifies a String value. -- `Int`: Specifies a 32-bit integer. -- `Int8`: Specifies an 8-bit integer. -- `BigDecimal`: Specifies a decimal number. Must be quoted. -- `Bytes`: Specifies a hexadecimal string. -- `List`: Specifies a list of items. Each item needs to specify its type and data. -- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. - -This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/ar/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/ar/developing/substreams-powered-subgraphs-faq.mdx deleted file mode 100644 index d46783a1f7e3..000000000000 --- a/website/pages/ar/developing/substreams-powered-subgraphs-faq.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Substreams-powered subgraphs FAQ ---- - -## What are Substreams? - -Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io/), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](https://substreams.streamingfast.io/reference-and-specs/manifests#sink) their data anywhere. - -اذهب إلى [وثائق سبستريمز](/substreams) للتعرف على المزيد حول سبستريمز. - -## ما هي الغرافات الفرعية المدعومة بسبستريمز؟ - -[الغرافات الفرعية المدعومة بسبستريمز](/cookbook/substreams-powered-subgraphs/) تجمع بين قوة سبستريمز وقابلية الاستعلام للغرافات الفرعية. عند نشر غراف فرعي مدعوم بواسطة سبستريمز، يمكن أن تنتج البيانات التي تم إنتاجها بواسطة تحويلات سبستريمز، تغييرات في الكيانات (https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs)، والتي تكون متوافقة مع كيانات الغرافات الفرعية. - -إذا كنت على دراية بتطوير الغراف الفرعي، فإن الغرافات الفرعية المدعومة بواسطة سبستريمز يمكن استعلامها بنفس الطريقة كما لو أنها تم إنتاجها بواسطة طبقة تحويل لغة أسيمبلي اسكريبت، بالإضافة إلى جميع فوائد الغراف الفرعي، مثل توفير واجهة برمجة تطبيقات ديناميكية ومرنة للغة استعلام الغراف. - -## كيف تختلف الغرافات الفرعية التي تعمل بسبستريمز عن الغرافات الفرعية؟ - -الغرافات الفرعية تتكون من مصادر البيانات التي تحدد الأحداث على السلسلة وكيف ينبغي تحويل تلك الأحداث من خلال معالجات مكتوبة بـلغة أسمبلي اسكريبت. يتم معالجة هذه الأحداث بتسلسل، استناداً إلى ترتيب حدوث الأحداث على السلسلة. - -على نقيض ذلك، تحتوي الغرافات الفرعية المدعومة بسبستريمز على مصدر بيانات واحد يشير إلى حزمة سبستريمز، والتي يتم معالجتها بواسطة نقطة الغراف. تتيح سبستريمز الوصول إلى بيانات إضافية على السلسلة مقارنةً بالغرافات الفرعية التقليدية، ويمكن أيضاً للعمليات الموازية الضخمة أن توفر أوقات معالجة أسرع بكثير. - -## ما هي فوائد استخدام الغرافات الفرعية المدعومة بسبستريمز؟ - -Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](https://substreams.streamingfast.io/documentation/develop/manifest-modules) to output to different [sinks](https://substreams.streamingfast.io/reference-and-specs/manifests#sink) such as PostgreSQL, MongoDB, and Kafka. - -## ماهي فوائد سبستريمز؟ - -There are many benefits to using Substreams, including: - -- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. - -- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). - -- التوجيه لأي مكان: يمكنك توجيه بياناتك لأي مكان ترغب فيه: بوستجريسكيو، مونغو دي بي، كافكا، الغرافات الفرعية، الملفات المسطحة، جداول جوجل. - -- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. - -- الوصول إلى بيانات إضافية غير متاحة كجزء من إجراء الإستدعاء عن بعد للترميز الكائني لجافاسكريبت - -- All the benefits of the Firehose. - -## What is the Firehose? - -تم تطوير فايرهوز بواسطة [StreamingFast] (https://www.streamingfast.io/) وهو طبقة استخراج بيانات سلاسل الكتل مصممة من الصفر لمعالجة كامل تاريخ سلاسل الكتل بسرعات لم يشهدها من قبل. يوفر نهجاً قائماً على الملفات وأولوية-التدفق، وهو مكون أساسي في مجموعة تقنيات ستريمنج فاست مفتوحة المصدر والأساس لسبستريمز. - -انتقل إلى [الوثائق](https://firehose.streamingfast.io/) لمعرفة المزيد حول فايرهوز. - -## What are the benefits of the Firehose? - -There are many benefits to using Firehose, including: - -- أقل تأخير وعدم الاستقصاء: بطريقة قائمة على أولوية-التدفق، تم تصميم نقاط فايرهوز للتسابق لدفع بيانات الكتلة أولاً. - -- Prevents downtimes: Designed from the ground up for High Availability. - -- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. - -- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. - -- يستفيد من الملفات المسطحة: يتم استخراج بيانات سلسلة الكتل إلى ملفات مسطحة، وهي أرخص وأكثر موارد الحوسبة تحسيناً. - -## أين يمكن للمطورين الوصول إلى مزيد من المعلومات حول الغرافات الفرعية المدعومة بسبستريمز و سبستريمز؟ - -The [Substreams documentation](/substreams) will teach you how to build Substreams modules. - -ستوضح لك [وثائق الغرافات الفرعية المدعومة بواسطة سبستريمز](/cookbook/substreams-powered-subgraphs/) كيفية تجميعها للنشر على الغراف. - -The [latest Substreams Codegen tool](https://streamingfastio.medium.com/substreams-codegen-no-code-tool-to-bootstrap-your-project-a11efe0378c6) will allow you to bootstrap a Substreams project without any code. - -## What is the role of Rust modules in Substreams? - -تعتبر وحدات رست مكافئة لمعينات أسمبلي اسكريبت في الغرافات الفرعية. يتم ترجمتها إلى ويب أسيمبلي بنفس الطريقة، ولكن النموذج البرمجي يسمح بالتنفيذ الموازي. تحدد وحدات رست نوع التحويلات والتجميعات التي ترغب في تطبيقها على بيانات سلاسل الكتل الخام. - -See [modules documentation](https://substreams.streamingfast.io/documentation/develop/manifest-modules) for details. - -## What makes Substreams composable? - -When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. - -على سبيل المثال، يمكن لأحمد بناء وحدة أسعار اسواق الصرف اللامركزية، ويمكن لإبراهيم استخدامها لبناء مجمِّع حجم للتوكن المهتم بها، ويمكن لآدم دمج أربع وحدات أسعار ديكس فردية لإنشاء مورد أسعار. سيقوم طلب واحد من سبستريمز بتجميع جميع هذه الوحدات الفردية، وربطها معًا لتقديم تدفق بيانات أكثر تطوراً ودقة. يمكن استخدام هذا التدفق لملءغراف فرعي ويمكن الاستعلام عنه من قبل المستخدمين. - -## كيف يمكنك إنشاء ونشر غراف فرعي مدعوم بسبستريمز؟ - -بعد [تعريف](/cookbook/substreams-powered-subgraphs/) الغراف الفرعي مدعوم بالسبستريمز، يمكنك استخدام واجهة سطر الأوامر للغراف لنشره في [استوديو الغراف الفرعي](https://thegraph.com/studio/). - -## أين يمكنني العثور على أمثلة على سبستريمز والغرافات الفرعية المدعومة بسبستريمز؟ - -يمكنك زيارة [جيت هب](https://github.com/pinax-network/awesome-substreams) للعثور على أمثلة للسبستريمز والغرافات الفرعية المدعومة بسبستريمز. - -## ماذا تعني السبستريمز والغرافات الفرعية المدعومة بسبستريمز بالنسبة لشبكة الغراف؟ - -إن التكامل مع سبستريمز والغرافات الفرعية المدعومة بسبستريمز واعدة بالعديد من الفوائد، بما في ذلك عمليات فهرسة عالية الأداء وقابلية أكبر للتركيبية من خلال استخدام وحدات المجتمع والبناء عليها. diff --git a/website/pages/ar/developing/supported-networks.mdx b/website/pages/ar/developing/supported-networks.mdx deleted file mode 100644 index c2e7677ae4fb..000000000000 --- a/website/pages/ar/developing/supported-networks.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: الشبكات المدعومة ---- - -import { getStaticPropsForSupportedNetworks } from '@/src/buildGetStaticProps' -import { SupportedNetworksTable } from '@/src/supportedNetworks' - -export const getStaticProps = getStaticPropsForSupportedNetworks(__filename) - - - -\* Baseline network support provided by the [upgrade Indexer](https://thegraph.com/blog/upgrade-indexer/). -\*\* Integration with Graph Node: `evm`, `near`, `cosmos`, `osmosis` and `ar` have native handler and type support in Graph Node. Chains which are Firehose- and Substreams-compatible can leverage the generalised [Substreams-powered subgraph](/cookbook/substreams-powered-subgraphs) integration (this includes `evm` and `near` networks). ⁠ Supports deployment of [Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs). - -- Subgraph Studio relies on the stability and reliability of the underlying technologies, for example JSON-RPC, Firehose and Substreams endpoints. -- Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. -- If a subgraph was published via the CLI and picked up by an Indexer, it could technically be queried even without support, and efforts are underway to further streamline integration of new networks. -- For a full list of which features are supported on the decentralized network, see [this page](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). - -## Running Graph Node locally - -If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. - -Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. Additionally, Graph Node can support Substreams-powered subgraphs for any network with Substreams support. diff --git a/website/pages/ar/developing/unit-testing-framework.mdx b/website/pages/ar/developing/unit-testing-framework.mdx deleted file mode 100644 index d123dc0f994b..000000000000 --- a/website/pages/ar/developing/unit-testing-framework.mdx +++ /dev/null @@ -1,1402 +0,0 @@ ---- -title: اختبار وحدة Framework ---- - -Learn how to use Matchstick, a unit testing framework developed by [LimeChain](https://limechain.tech/). Matchstick enables subgraph developers to test their mapping logic in a sandboxed environment and sucessfully deploy their subgraphs. - -## Benefits of Using Matchstick - -- It's written in Rust and optimized for high performance. -- It gives you access to developer features, including the ability to mock contract calls, make assertions about the store state, monitor subgraph failures, check test performance, and many more. - -## Getting Started - -### Install Dependencies - -In order to use the test helper methods and run tests, you need to install the following dependencies: - -```sh -yarn add --dev matchstick-as -``` - -### Install PostgreSQL - -`graph-node` depends on PostgreSQL, so if you don't already have it, then you will need to install it. - -> Note: It's highly recommended to use the commands below to avoid unexpected errors. - -#### Using MacOS - -Installation command: - -```sh -brew install postgresql -``` - -Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` - -```sh -ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib -``` - -#### Using Linux - -Installation command (depends on your distro): - -```sh -sudo apt install postgresql -``` - -### Using WSL (Windows Subsystem for Linux) - -You can use Matchstick on WSL both using the Docker approach and the binary approach. As WSL can be a bit tricky, here's a few tips in case you encounter issues like - -``` -static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = -``` - -or - -``` -/node_modules/gluegun/build/index.js:13 throw up; -``` - -Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running - -``` -sudo apt-get install libpq-dev -``` - -And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as - -```json -{ - "name": "demo-subgraph", - "version": "0.1.0", - "scripts": { - "test": "graph test", - ... - }, - "dependencies": { - "@graphprotocol/graph-cli": "^0.56.0", - "@graphprotocol/graph-ts": "^0.31.0", - "matchstick-as": "^0.6.0" - } -} -``` - -### Using Matchstick - -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). - -### CLI options - -This will run all tests in the test folder: - -```sh -graph test -``` - -This will run a test named gravity.test.ts and/or all test inside of a folder named gravity: - -```sh -graph test gravity -``` - -This will run only that specific test file: - -```sh -graph test path/to/file.test.ts -``` - -**Options:** - -```sh --c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) --f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. --h, --help Show usage information --l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) --r, --recompile Forces tests to be recompiled --v, --version Choose the version of the rust binary that you want to be downloaded/used -``` - -### Docker - -From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. - -❗ `graph test -d` forces `docker run` to run with flag `-t`. This must be removed to run inside non-interactive environments (like GitHub CI). - -❗ If you have previously ran `graph test` you may encounter the following error during docker build: - -```sh - error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied -``` - -In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` - -### Configuration - -Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: - -```yaml -testsFolder: path/to/tests -libsFolder: path/to/libs -manifestPath: path/to/subgraph.yaml -``` - -### Demo subgraph - -You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) - -### Video tutorials - -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) - -## Tests structure - -_**IMPORTANT: The test structure described below depens on `matchstick-as` version >=0.5.0**_ - -### describe() - -`describe(name: String , () => {})` - Defines a test group. - -**_Notes:_** - -- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ - -Example: - -```typescript -import { describe, test } from "matchstick-as/assembly/index" -import { handleNewGravatar } from "../../src/gravity" - -describe("handleNewGravatar()", () => { - test("Should create a new Gravatar entity", () => { - ... - }) -}) -``` - -Nested `describe()` example: - -```typescript -import { describe, test } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar } from "../../src/gravity" - -describe("handleUpdatedGravatar()", () => { - describe("When entity exists", () => { - test("updates the entity", () => { - ... - }) - }) - - describe("When entity does not exists", () => { - test("it creates a new entity", () => { - ... - }) - }) -}) -``` - ---- - -### test() - -`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. - -Example: - -```typescript -import { describe, test } from "matchstick-as/assembly/index" -import { handleNewGravatar } from "../../src/gravity" - -describe("handleNewGravatar()", () => { - test("Should create a new Entity", () => { - ... - }) -}) -``` - -or - -```typescript -test("handleNewGravatar() should create a new entity", () => { - ... -}) - - -``` - ---- - -### beforeAll() - -Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. - -أمثلة: - -Code inside `beforeAll` will execute once before _all_ tests in the file. - -```typescript -import { describe, test, beforeAll } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" -import { Gravatar } from "../../generated/schema" - -beforeAll(() => { - let gravatar = new Gravatar("0x0") - gravatar.displayName = “First Gravatar” - gravatar.save() - ... -}) - -describe("When the entity does not exist", () => { - test("it should create a new Gravatar with id 0x1", () => { - ... - }) -}) - -describe("When entity already exists", () => { - test("it should update the Gravatar with id 0x0", () => { - ... - }) -}) -``` - -Code inside `beforeAll` will execute once before all tests in the first describe block - -```typescript -import { describe, test, beforeAll } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" -import { Gravatar } from "../../generated/schema" - -describe("handleUpdatedGravatar()", () => { - beforeAll(() => { - let gravatar = new Gravatar("0x0") - gravatar.displayName = “First Gravatar” - gravatar.save() - ... - }) - - test("updates Gravatar with id 0x0", () => { - ... - }) - - test("creates new Gravatar with id 0x1", () => { - ... - }) -}) -``` - ---- - -### afterAll() - -Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. - -Example: - -Code inside `afterAll` will execute once after _all_ tests in the file. - -```typescript -import { describe, test, afterAll } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" -import { store } from "@graphprotocol/graph-ts" - -afterAll(() => { - store.remove("Gravatar", "0x0") - ... -}) - -describe("handleNewGravatar, () => { - test("creates Gravatar with id 0x0", () => { - ... - }) -}) - -describe("handleUpdatedGravatar", () => { - test("updates Gravatar with id 0x0", () => { - ... - }) -}) -``` - -Code inside `afterAll` will execute once after all tests in the first describe block - -```typescript -import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" - -describe("handleNewGravatar", () => { - afterAll(() => { - store.remove("Gravatar", "0x1") - ... - }) - - test("It creates a new entity with Id 0x0", () => { - ... - }) - - test("It creates a new entity with Id 0x1", () => { - ... - }) -}) - -describe("handleUpdatedGravatar", () => { - test("updates Gravatar with id 0x0", () => { - ... - }) -}) -``` - ---- - -### beforeEach() - -Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. - -Examples: Code inside `beforeEach` will execute before each tests. - -```typescript -import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" -import { handleNewGravatars } from "./utils" - -beforeEach(() => { - clearStore() // <-- clear the store before each test in the file -}) - -describe("handleNewGravatars, () => { - test("A test that requires a clean store", () => { - ... - }) - - test("Second that requires a clean store", () => { - ... - }) -}) - - ... -``` - -Code inside `beforeEach` will execute only before each test in the that describe - -```typescript -import { describe, test, beforeEach } from 'matchstick-as/assembly/index' -import { handleUpdatedGravatar, handleNewGravatar } from '../../src/gravity' - -describe('handleUpdatedGravatars', () => { - beforeEach(() => { - let gravatar = new Gravatar('0x0') - gravatar.displayName = 'First Gravatar' - gravatar.imageUrl = '' - gravatar.save() - }) - - test('Upates the displayName', () => { - assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') - - // code that should update the displayName to 1st Gravatar - - assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') - store.remove('Gravatar', '0x0') - }) - - test('Updates the imageUrl', () => { - assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') - - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 - - assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') - store.remove('Gravatar', '0x0') - }) -}) -``` - ---- - -### afterEach() - -Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. - -أمثلة: - -Code inside `afterEach` will execute after every test. - -```typescript -import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" - -beforeEach(() => { - let gravatar = new Gravatar("0x0") - gravatar.displayName = “First Gravatar” - gravatar.save() -}) - -afterEach(() => { - store.remove("Gravatar", "0x0") -}) - -describe("handleNewGravatar", () => { - ... -}) - -describe("handleUpdatedGravatar", () => { - test("Upates the displayName", () => { - assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - - // code that should update the displayName to 1st Gravatar - - assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") - }) - - test("Updates the imageUrl", () => { - assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 - - assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") - }) -}) -``` - -Code inside `afterEach` will execute after each test in that describe - -```typescript -import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" - -describe("handleNewGravatar", () => { - ... -}) - -describe("handleUpdatedGravatar", () => { - beforeEach(() => { - let gravatar = new Gravatar("0x0") - gravatar.displayName = "First Gravatar" - gravatar.imageUrl = "" - gravatar.save() - }) - - afterEach(() => { - store.remove("Gravatar", "0x0") - }) - - test("Upates the displayName", () => { - assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - - // code that should update the displayName to 1st Gravatar - - assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") - }) - - test("Updates the imageUrl", () => { - assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 - - assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") - }) -}) -``` - -## Asserts - -```typescript -fieldEquals(entityType: string, id: string, fieldName: string, expectedVal: string) - -equals(expected: ethereum.Value, actual: ethereum.Value) - -notInStore(entityType: string, id: string) - -addressEquals(address1: Address, address2: Address) - -bytesEquals(bytes1: Bytes, bytes2: Bytes) - -i32Equals(number1: i32, number2: i32) - -bigIntEquals(bigInt1: BigInt, bigInt2: BigInt) - -booleanEquals(bool1: boolean, bool2: boolean) - -stringEquals(string1: string, string2: string) - -arrayEquals(array1: Array, array2: Array) - -tupleEquals(tuple1: ethereum.Tuple, tuple2: ethereum.Tuple) - -assertTrue(value: boolean) - -assertNull(value: T) - -assertNotNull(value: T) - -entityCount(entityType: string, expectedCount: i32) -``` - -As of version 0.6.0, asserts support custom error messages as well - -```typescript -assert.fieldEquals('Gravatar', '0x123', 'id', '0x123', 'Id should be 0x123') -assert.equals(ethereum.Value.fromI32(1), ethereum.Value.fromI32(1), 'Value should equal 1') -assert.notInStore('Gravatar', '0x124', 'Gravatar should not be in store') -assert.addressEquals(Address.zero(), Address.zero(), 'Address should be zero') -assert.bytesEquals(Bytes.fromUTF8('0x123'), Bytes.fromUTF8('0x123'), 'Bytes should be equal') -assert.i32Equals(2, 2, 'I32 should equal 2') -assert.bigIntEquals(BigInt.fromI32(1), BigInt.fromI32(1), 'BigInt should equal 1') -assert.booleanEquals(true, true, 'Boolean should be true') -assert.stringEquals('1', '1', 'String should equal 1') -assert.arrayEquals([ethereum.Value.fromI32(1)], [ethereum.Value.fromI32(1)], 'Arrays should be equal') -assert.tupleEquals( - changetype([ethereum.Value.fromI32(1)]), - changetype([ethereum.Value.fromI32(1)]), - 'Tuples should be equal', -) -assert.assertTrue(true, 'Should be true') -assert.assertNull(null, 'Should be null') -assert.assertNotNull('not null', 'Should be not null') -assert.entityCount('Gravatar', 1, 'There should be 2 gravatars') -assert.dataSourceCount('GraphTokenLockWallet', 1, 'GraphTokenLockWallet template should have one data source') -assert.dataSourceExists( - 'GraphTokenLockWallet', - Address.zero().toHexString(), - 'GraphTokenLockWallet should have a data source for zero address', -) -``` - -## Write a Unit Test - -Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). - -Assuming we have the following handler function (along with two helper functions to make our life easier): - -```typescript -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id.toHex()) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() -} - -export function handleNewGravatars(events: NewGravatar[]): void { - events.forEach((event) => { - handleNewGravatar(event) - }) -} - -export function createNewGravatarEvent( - id: i32, - ownerAddress: string, - displayName: string, - imageUrl: string, -): NewGravatar { - let mockEvent = newMockEvent() - let newGravatarEvent = new NewGravatar( - mockEvent.address, - mockEvent.logIndex, - mockEvent.transactionLogIndex, - mockEvent.logType, - mockEvent.block, - mockEvent.transaction, - mockEvent.parameters, - ) - newGravatarEvent.parameters = new Array() - let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) - let addressParam = new ethereum.EventParam( - 'ownderAddress', - ethereum.Value.fromAddress(Address.fromString(ownerAddress)), - ) - let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) - let imageUrlParam = new ethereum.EventParam('imageUrl', ethereum.Value.fromString(imageUrl)) - - newGravatarEvent.parameters.push(idParam) - newGravatarEvent.parameters.push(addressParam) - newGravatarEvent.parameters.push(displayNameParam) - newGravatarEvent.parameters.push(imageUrlParam) - - return newGravatarEvent -} -``` - -We first have to create a test file in our project. This is an example of how that might look like: - -```typescript -import { clearStore, test, assert } from 'matchstick-as/assembly/index' -import { Gravatar } from '../../generated/schema' -import { NewGravatar } from '../../generated/Gravity/Gravity' -import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' - -test('Can call mappings with custom events', () => { - // Create a test entity and save it in the store as initial state (optional) - let gravatar = new Gravatar('gravatarId0') - gravatar.save() - - // Create mock events - let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - - // Call mapping functions passing the events we just created - handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) - - // Assert the state of the store - assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') - assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') - assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') - - // Clear the store in order to start the next test off on a clean slate - clearStore() -}) - -test('Next test', () => { - //... -}) -``` - -That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: - -- We're setting up our initial state and adding one custom Gravatar entity; -- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; -- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; -- We assert the state of the store. How does that work? - We're passing a unique combination of Entity type and id. Then we check a specific field on that Entity and assert that it has the value we expect it to have. We're doing this both for the initial Gravatar Entity we added to the store, as well as the two Gravatar entities that gets added when the handler function is called; -- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. - -There we go - we've created our first test! 👏 - -Now in order to run our tests you simply need to run the following in your subgraph root folder: - -`graph test Gravity` - -And if all goes well you should be greeted with the following: - -![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) - -## Common test scenarios - -### Hydrating the store with a certain state - -Users are able to hydrate the store with a known set of entities. Here's an example to initialise the store with a Gravatar entity: - -```typescript -let gravatar = new Gravatar('entryId') -gravatar.save() -``` - -### Calling a mapping function with an event - -A user can create a custom event and pass it to a mapping function that is bound to the store: - -```typescript -import { store } from 'matchstick-as/assembly/store' -import { NewGravatar } from '../../generated/Gravity/Gravity' -import { handleNewGravatars, createNewGravatarEvent } from './mapping' - -let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - -handleNewGravatar(newGravatarEvent) -``` - -### Calling all of the mappings with event fixtures - -Users can call the mappings with test fixtures. - -```typescript -import { NewGravatar } from '../../generated/Gravity/Gravity' -import { store } from 'matchstick-as/assembly/store' -import { handleNewGravatars, createNewGravatarEvent } from './mapping' - -let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - -let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - -handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) -``` - -``` -export function handleNewGravatars(events: NewGravatar[]): void { - events.forEach(event => { - handleNewGravatar(event); - }); -} -``` - -### Mocking contract calls - -Users can mock contract calls: - -```typescript -import { addMetadata, assert, createMockedFunction, clearStore, test } from 'matchstick-as/assembly/index' -import { Gravity } from '../../generated/Gravity/Gravity' -import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' - -let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') -let expectedResult = Address.fromString('0x90cBa2Bbb19ecc291A12066Fd8329D65FA1f1947') -let bigIntParam = BigInt.fromString('1234') -createMockedFunction(contractAddress, 'gravatarToOwner', 'gravatarToOwner(uint256):(address)') - .withArgs([ethereum.Value.fromSignedBigInt(bigIntParam)]) - .returns([ethereum.Value.fromAddress(Address.fromString('0x90cBa2Bbb19ecc291A12066Fd8329D65FA1f1947'))]) - -let gravity = Gravity.bind(contractAddress) -let result = gravity.gravatarToOwner(bigIntParam) - -assert.equals(ethereum.Value.fromAddress(expectedResult), ethereum.Value.fromAddress(result)) -``` - -As demonstrated, in order to mock a contract call and hardcore a return value, the user must provide a contract address, function name, function signature, an array of arguments, and of course - the return value. - -Users can also mock function reverts: - -```typescript -let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') -createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(string,string)') - .withArgs([ethereum.Value.fromAddress(contractAddress)]) - .reverts() -``` - -### Mocking IPFS files (from matchstick 0.4.1) - -Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. - -NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: - -`.test.ts` file: - -```typescript -import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' -import { ipfs } from '@graphprotocol/graph-ts' -import { gravatarFromIpfs } from './utils' - -// Export ipfs.map() callback in order for matchstck to detect it -export { processGravatar } from './utils' - -test('ipfs.cat', () => { - mockIpfsFile('ipfsCatfileHash', 'tests/ipfs/cat.json') - - assert.entityCount(GRAVATAR_ENTITY_TYPE, 0) - - gravatarFromIpfs() - - assert.entityCount(GRAVATAR_ENTITY_TYPE, 1) - assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '1', 'imageUrl', 'https://i.ytimg.com/vi/MELP46s8Cic/maxresdefault.jpg') - - clearStore() -}) - -test('ipfs.map', () => { - mockIpfsFile('ipfsMapfileHash', 'tests/ipfs/map.json') - - assert.entityCount(GRAVATAR_ENTITY_TYPE, 0) - - ipfs.map('ipfsMapfileHash', 'processGravatar', Value.fromString('Gravatar'), ['json']) - - assert.entityCount(GRAVATAR_ENTITY_TYPE, 3) - assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '1', 'displayName', 'Gravatar1') - assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '2', 'displayName', 'Gravatar2') - assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '3', 'displayName', 'Gravatar3') -}) -``` - -`utils.ts` file: - -```typescript -import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" -import { Gravatar } from "../../generated/schema" - -... - -// ipfs.map callback -export function processGravatar(value: JSONValue, userData: Value): void { - // See the JSONValue documentation for details on dealing - // with JSON values - let obj = value.toObject() - let id = obj.get('id') - - if (!id) { - return - } - - // Callbacks can also created entities - let gravatar = new Gravatar(id.toString()) - gravatar.displayName = userData.toString() + id.toString() - gravatar.save() -} - -// function that calls ipfs.cat -export function gravatarFromIpfs(): void { - let rawData = ipfs.cat("ipfsCatfileHash") - - if (!rawData) { - return - } - - let jsonData = json.fromBytes(rawData as Bytes).toObject() - - let id = jsonData.get('id') - let url = jsonData.get("imageUrl") - - if (!id || !url) { - return - } - - let gravatar = new Gravatar(id.toString()) - gravatar.imageUrl = url.toString() - gravatar.save() -} -``` - -### Asserting the state of the store - -Users are able to assert the final (or midway) state of the store through asserting entities. In order to do this, the user has to supply an Entity type, the specific ID of an Entity, a name of a field on that Entity, and the expected value of the field. Here's a quick example: - -```typescript -import { assert } from 'matchstick-as/assembly/index' -import { Gravatar } from '../generated/schema' - -let gravatar = new Gravatar('gravatarId0') -gravatar.save() - -assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') -``` - -Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. - -### Interacting with Event metadata - -Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: - -```typescript -// Read -let logType = newGravatarEvent.logType - -// Write -let UPDATED_ADDRESS = '0xB16081F360e3847006dB660bae1c6d1b2e17eC2A' -newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) -``` - -### Asserting variable equality - -```typescript -assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); -``` - -### Asserting that an Entity is **not** in the store - -Users can assert that an entity does not exist in the store. The function takes an entity type and an id. If the entity is in fact in the store, the test will fail with a relevant error message. Here's a quick example of how to use this functionality: - -```typescript -assert.notInStore('Gravatar', '23') -``` - -### Printing the whole store, or single entities from it (for debug purposes) - -You can print the whole store to the console using this helper function: - -```typescript -import { logStore } from 'matchstick-as/assembly/store' - -logStore() -``` - -As of version 0.6.0, `logStore` no longer prints derived fields, instead users can use the new `logEntity` function. Of course `logEntity` can be used to print any entity, not just ones that have derived fields. `logEntity` takes the entity type, entity id and a `showRelated` flag to indicate if users want to print the related derived entities. - -``` -import { logEntity } from 'matchstick-as/assembly/store' - - -logEntity("Gravatar", 23, true) -``` - -### Expected failure - -Users can have expected test failures, using the shouldFail flag on the test() functions: - -```typescript -test( - 'Should throw an error', - () => { - throw new Error() - }, - true, -) -``` - -If the test is marked with shouldFail = true but DOES NOT fail, that will show up as an error in the logs and the test block will fail. Also, if it's marked with shouldFail = false (the default state), the test executor will crash. - -### Logging - -Having custom logs in the unit tests is exactly the same as logging in the mappings. The difference is that the log object needs to be imported from matchstick-as rather than graph-ts. Here's a simple example with all non-critical log types: - -```typescript -import { test } from "matchstick-as/assembly/index"; -import { log } from "matchstick-as/assembly/log"; - -test("Success", () => { - log.success("Success!". []); -}); -test("Error", () => { - log.error("Error :( ", []); -}); -test("Debug", () => { - log.debug("Debugging...", []); -}); -test("Info", () => { - log.info("Info!", []); -}); -test("Warning", () => { - log.warning("Warning!", []); -}); -``` - -Users can also simulate a critical failure, like so: - -```typescript -test('Blow everything up', () => { - log.critical('Boom!') -}) -``` - -Logging critical errors will stop the execution of the tests and blow everything up. After all - we want to make sure you're code doesn't have critical logs in deployment, and you should notice right away if that were to happen. - -### Testing derived fields - -Testing derived fields is a feature which allows users to set a field on a certain entity and have another entity be updated automatically if it derives one of its fields from the first entity. - -Before version `0.6.0` it was possible to get the derived entities by accessing them as entity fields/properties, like so: - -```typescript -let entity = ExampleEntity.load('id') -let derivedEntity = entity.derived_entity -``` - -As of version `0.6.0`, this is done by using the `loadRelated` function of graph-node, the derived entities can be accessed the same way as in the handlers. - -```typescript -test('Derived fields example test', () => { - let mainAccount = GraphAccount.load('12')! - - assert.assertNull(mainAccount.get('nameSignalTransactions')) - assert.assertNull(mainAccount.get('operatorOf')) - - let operatedAccount = GraphAccount.load('1')! - operatedAccount.operators = [mainAccount.id] - operatedAccount.save() - - mockNameSignalTransaction('1234', mainAccount.id) - mockNameSignalTransaction('2', mainAccount.id) - - mainAccount = GraphAccount.load('12')! - - assert.assertNull(mainAccount.get('nameSignalTransactions')) - assert.assertNull(mainAccount.get('operatorOf')) - - const nameSignalTransactions = mainAccount.nameSignalTransactions.load() - const operatorsOfMainAccount = mainAccount.operatorOf.load() - - assert.i32Equals(2, nameSignalTransactions.length) - assert.i32Equals(1, operatorsOfMainAccount.length) - - assert.stringEquals('1', operatorsOfMainAccount[0].id) - - mockNameSignalTransaction('2345', mainAccount.id) - - let nst = NameSignalTransaction.load('1234')! - nst.signer = '11' - nst.save() - - store.remove('NameSignalTransaction', '2') - - mainAccount = GraphAccount.load('12')! - assert.i32Equals(1, mainAccount.nameSignalTransactions.load().length) -}) -``` - -### Testing `loadInBlock` - -As of version `0.6.0`, users can test `loadInBlock` by using the `mockInBlockStore`, it allows mocking entities in the block cache. - -```typescript -import { afterAll, beforeAll, describe, mockInBlockStore, test } from 'matchstick-as' -import { Gravatar } from '../../generated/schema' - -describe('loadInBlock', () => { - beforeAll(() => { - mockInBlockStore('Gravatar', 'gravatarId0', gravatar) - }) - - afterAll(() => { - clearInBlockStore() - }) - - test('Can use entity.loadInBlock() to retrieve entity from cache store in the current block', () => { - let retrievedGravatar = Gravatar.loadInBlock('gravatarId0') - assert.stringEquals('gravatarId0', retrievedGravatar!.get('id')!.toString()) - }) - - test("Returns null when calling entity.loadInBlock() if an entity doesn't exist in the current block", () => { - let retrievedGravatar = Gravatar.loadInBlock('IDoNotExist') - assert.assertNull(retrievedGravatar) - }) -}) -``` - -### Testing dynamic data sources - -Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). - -Example below: - -First we have the following event handler (which has been intentionally repurposed to showcase datasource mocking): - -```typescript -export function handleApproveTokenDestinations(event: ApproveTokenDestinations): void { - let tokenLockWallet = TokenLockWallet.load(dataSource.address().toHexString())! - if (dataSource.network() == 'rinkeby') { - tokenLockWallet.tokenDestinationsApproved = true - } - let context = dataSource.context() - if (context.get('contextVal')!.toI32() > 0) { - tokenLockWallet.setBigInt('tokensReleased', BigInt.fromI32(context.get('contextVal')!.toI32())) - } - tokenLockWallet.save() -} -``` - -And then we have the test using one of the methods in the dataSourceMock namespace to set a new return value for all of the dataSource functions: - -```typescript -import { assert, test, newMockEvent, dataSourceMock } from 'matchstick-as/assembly/index' -import { BigInt, DataSourceContext, Value } from '@graphprotocol/graph-ts' - -import { handleApproveTokenDestinations } from '../../src/token-lock-wallet' -import { ApproveTokenDestinations } from '../../generated/templates/GraphTokenLockWallet/GraphTokenLockWallet' -import { TokenLockWallet } from '../../generated/schema' - -test('Data source simple mocking example', () => { - let addressString = '0xA16081F360e3847006dB660bae1c6d1b2e17eC2A' - let address = Address.fromString(addressString) - - let wallet = new TokenLockWallet(address.toHexString()) - wallet.save() - let context = new DataSourceContext() - context.set('contextVal', Value.fromI32(325)) - dataSourceMock.setReturnValues(addressString, 'rinkeby', context) - let event = changetype(newMockEvent()) - - assert.assertTrue(!wallet.tokenDestinationsApproved) - - handleApproveTokenDestinations(event) - - wallet = TokenLockWallet.load(address.toHexString())! - assert.assertTrue(wallet.tokenDestinationsApproved) - assert.bigIntEquals(wallet.tokensReleased, BigInt.fromI32(325)) - - dataSourceMock.resetValues() -}) -``` - -Notice that dataSourceMock.resetValues() is called at the end. That's because the values are remembered when they are changed and need to be reset if you want to go back to the default values. - -### Testing dynamic data source creation - -As of version `0.6.0`, it is possible to test if a new data source has been created from a template. This feature supports both ethereum/contract and file/ipfs templates. There are four functions for this: - -- `assert.dataSourceCount(templateName, expectedCount)` can be used to assert the expected count of data sources from the specified template -- `assert.dataSourceExists(templateName, address/ipfsHash)` asserts that a data source with the specified identifier (could be a contract address or IPFS file hash) from a specified template was created -- `logDataSources(templateName)` prints all data sources from the specified template to the console for debugging purposes -- `readFile(path)` reads a JSON file that represents an IPFS file and returns the content as Bytes - -#### Testing `ethereum/contract` templates - -```typescript -test('ethereum/contract dataSource creation example', () => { - // Assert there are no dataSources created from GraphTokenLockWallet template - assert.dataSourceCount('GraphTokenLockWallet', 0) - - // Create a new GraphTokenLockWallet datasource with address 0xA16081F360e3847006dB660bae1c6d1b2e17eC2A - GraphTokenLockWallet.create(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2A')) - - // Assert the dataSource has been created - assert.dataSourceCount('GraphTokenLockWallet', 1) - - // Add a second dataSource with context - let context = new DataSourceContext() - context.set('contextVal', Value.fromI32(325)) - - GraphTokenLockWallet.createWithContext(Address.fromString('0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'), context) - - // Assert there are now 2 dataSources - assert.dataSourceCount('GraphTokenLockWallet', 2) - - // Assert that a dataSource with address "0xA16081F360e3847006dB660bae1c6d1b2e17eC2B" was created - // Keep in mind that `Address` type is transformed to lower case when decoded, so you have to pass the address as all lower case when asserting if it exists - assert.dataSourceExists('GraphTokenLockWallet', '0xA16081F360e3847006dB660bae1c6d1b2e17eC2B'.toLowerCase()) - - logDataSources('GraphTokenLockWallet') -}) -``` - -##### Example `logDataSource` output - -```bash -🛠 { - "0xa16081f360e3847006db660bae1c6d1b2e17ec2a": { - "kind": "ethereum/contract", - "name": "GraphTokenLockWallet", - "address": "0xa16081f360e3847006db660bae1c6d1b2e17ec2a", - "context": null - }, - "0xa16081f360e3847006db660bae1c6d1b2e17ec2b": { - "kind": "ethereum/contract", - "name": "GraphTokenLockWallet", - "address": "0xa16081f360e3847006db660bae1c6d1b2e17ec2b", - "context": { - "contextVal": { - "type": "Int", - "data": 325 - } - } - } -} -``` - -#### Testing `file/ipfs` templates - -Similarly to contract dynamic data sources, users can test test file datas sources and their handlers - -##### Example `subgraph.yaml` - -```yaml -... -templates: - - kind: file/ipfs - name: GraphTokenLockMetadata - network: mainnet - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/token-lock-wallet.ts - handler: handleMetadata - entities: - - TokenLockMetadata - abis: - - name: GraphTokenLockWallet - file: ./abis/GraphTokenLockWallet.json -``` - -##### Example `schema.graphql` - -```graphql -""" -Token Lock Wallets which hold locked GRT -""" -type TokenLockMetadata @entity { - "The address of the token lock wallet" - id: ID! - "Start time of the release schedule" - startTime: BigInt! - "End time of the release schedule" - endTime: BigInt! - "Number of periods between start time and end time" - periods: BigInt! - "Time when the releases start" - releaseStartTime: BigInt! -} -``` - -##### Example `metadata.json` - -```json -{ - "startTime": 1, - "endTime": 1, - "periods": 1, - "releaseStartTime": 1 -} -``` - -##### Example handler - -```typescript -export function handleMetadata(content: Bytes): void { - // dataSource.stringParams() returns the File DataSource CID - // stringParam() will be mocked in the handler test - // for more info https://thegraph.com/docs/en/developing/creating-a-subgraph/#create-a-new-handler-to-process-files - let tokenMetadata = new TokenLockMetadata(dataSource.stringParam()) - const value = json.fromBytes(content).toObject() - - if (value) { - const startTime = value.get('startTime') - const endTime = value.get('endTime') - const periods = value.get('periods') - const releaseStartTime = value.get('releaseStartTime') - - if (startTime && endTime && periods && releaseStartTime) { - tokenMetadata.startTime = startTime.toBigInt() - tokenMetadata.endTime = endTime.toBigInt() - tokenMetadata.periods = periods.toBigInt() - tokenMetadata.releaseStartTime = releaseStartTime.toBigInt() - } - - tokenMetadata.save() - } -} -``` - -##### Example test - -```typescript -import { assert, test, dataSourceMock, readFile } from 'matchstick-as' -import { Address, BigInt, Bytes, DataSourceContext, ipfs, json, store, Value } from '@graphprotocol/graph-ts' - -import { handleMetadata } from '../../src/token-lock-wallet' -import { TokenLockMetadata } from '../../generated/schema' -import { GraphTokenLockMetadata } from '../../generated/templates' - -test('file/ipfs dataSource creation example', () => { - // Generate the dataSource CID from the ipfsHash + ipfs path file - // For example QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm/example.json - const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' - const CID = `${ipfshash}/example.json` - - // Create a new dataSource using the generated CID - GraphTokenLockMetadata.create(CID) - - // Assert the dataSource has been created - assert.dataSourceCount('GraphTokenLockMetadata', 1) - assert.dataSourceExists('GraphTokenLockMetadata', CID) - logDataSources('GraphTokenLockMetadata') - - // Now we have to mock the dataSource metadata and specifically dataSource.stringParam() - // dataSource.stringParams actually uses the value of dataSource.address(), so we will mock the address using dataSourceMock from matchstick-as - // First we will reset the values and then use dataSourceMock.setAddress() to set the CID - dataSourceMock.resetValues() - dataSourceMock.setAddress(CID) - - // Now we need to generate the Bytes to pass to the dataSource handler - // For this case we introduced a new function readFile, that reads a local json and returns the content as Bytes - const content = readFile(`path/to/metadata.json`) - handleMetadata(content) - - // Now we will test if a TokenLockMetadata was created - const metadata = TokenLockMetadata.load(CID) - - assert.bigIntEquals(metadata!.endTime, BigInt.fromI32(1)) - assert.bigIntEquals(metadata!.periods, BigInt.fromI32(1)) - assert.bigIntEquals(metadata!.releaseStartTime, BigInt.fromI32(1)) - assert.bigIntEquals(metadata!.startTime, BigInt.fromI32(1)) -}) -``` - -## Test Coverage - -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. - -The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. - -### المتطلبات الأساسية - -To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: - -#### Export your handlers - -In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: - -```typescript -import { handleNewGravatar } from '../../src/gravity' -``` - -In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: - -```typescript -export { handleNewGravatar } -``` - -### الاستخدام - -Once that's all set up, to run the test coverage tool, simply run: - -```sh -graph test -- -c -``` - -You could also add a custom `coverage` command to your `package.json` file, like so: - -```typescript - "scripts": { - /.../ - "coverage": "graph test -- -c" - }, -``` - -That will execute the coverage tool and you should see something like this in the terminal: - -```sh -$ graph test -c -Skipping download/install step because binary already exists at /Users/petko/work/demo-subgraph/node_modules/binary-install-raw/bin/0.4.0 - -___ ___ _ _ _ _ _ -| \/ | | | | | | | (_) | | -| . . | __ _| |_ ___| |__ ___| |_ _ ___| | __ -| |\/| |/ _` | __/ __| '_ \/ __| __| |/ __| |/ / -| | | | (_| | || (__| | | \__ \ |_| | (__| < -\_| |_/\__,_|\__\___|_| |_|___/\__|_|\___|_|\_\ - -Compiling... - -Running in coverage report mode. - ️ -Reading generated test modules... 🔎️ - -Generating coverage report 📝 - -Handlers for source 'Gravity': -Handler 'handleNewGravatar' is tested. -Handler 'handleUpdatedGravatar' is not tested. -Handler 'handleCreateGravatar' is tested. -Test coverage: 66.7% (2/3 handlers). - -Handlers for source 'GraphTokenLockWallet': -Handler 'handleTokensReleased' is not tested. -Handler 'handleTokensWithdrawn' is not tested. -Handler 'handleTokensRevoked' is not tested. -Handler 'handleManagerUpdated' is not tested. -Handler 'handleApproveTokenDestinations' is not tested. -Handler 'handleRevokeTokenDestinations' is not tested. -Test coverage: 0.0% (0/6 handlers). - -Global test coverage: 22.2% (2/9 handlers). -``` - -### Test run time duration in the log output - -The log output includes the test run duration. Here's an example: - -`[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` - -## Common compiler errors - -> Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined - -This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/developing/graph-ts/api/#logging-api) - -> ERROR TS2554: Expected ? arguments, but got ?. -> -> return new ethereum.Block(defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultAddress, defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt); -> -> in ~lib/matchstick-as/assembly/defaults.ts(18,12) -> -> ERROR TS2554: Expected ? arguments, but got ?. -> -> return new ethereum.Transaction(defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt); -> -> in ~lib/matchstick-as/assembly/defaults.ts(24,12) - -The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. - -## مصادر إضافية - -For any additional support, check out this [demo subgraph repo using Matchstick](https://github.com/LimeChain/demo-subgraph#readme_). - -## Feedback - -If you have any questions, feedback, feature requests or just want to reach out, the best place would be The Graph Discord where we have a dedicated channel for Matchstick, called 🔥| unit-testing. diff --git a/website/pages/ar/indexing/_meta.js b/website/pages/ar/indexing/_meta.js new file mode 100644 index 000000000000..de19e80d5f5a --- /dev/null +++ b/website/pages/ar/indexing/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../en/indexing/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ar/indexing/chain-integration-overview.mdx b/website/pages/ar/indexing/chain-integration-overview.mdx new file mode 100644 index 000000000000..e6b95ec0fc17 --- /dev/null +++ b/website/pages/ar/indexing/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: نظرة عامة حول عملية التكامل مع الشبكة +--- + +تم تصميم عملية تكامل قائمة على الحوكمة وبشفافية لفرق سلاسل الكتل التي تسعى للإندماج مع بروتوكول الغراف (https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). إنها عملية مكونة من 3 مراحل، كما هو ملخص أدناه. + +## المرحلة الأولى: التكامل التقني + +- Please visit [New Chain Integration](/indexing/new-chain-integration/) for information on `graph-node` support for new chains. +- تستهل الفرق عملية التكامل مع البروتوكول من خلال إنشاء موضوع في المنتدى هنا(https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (الفئة الفرعية "مصادر البيانات الجديدة" تحت قسم "الحوكمة واقتراحات تحسين الغراف"). استخدام قالب المنتدى الافتراضي إلزامي. + +## المرحلة الثانية: التحقق من صحة التكامل + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON-RPC, Firehose or Substreams endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- مفهرسو الغراف يختبرون التكامل على شبكة إختبار الغراف. +- يقوم المطورون الأساسيون والمفهرسون بمراقبة استقرار، وأداء، وحتمية البيانات. + +## المرحلة الثالثة: التكامل مع الشبكة الرئيسية + +- يتم اقتراح التكامل مع الشبكة الرئيسية من قبل الفرق عن طريق تقديم اقتراح تحسين الغراف (GIP) واستهلال طلب سحب (PR) على مصفوفة دعم الميزات (https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md)(لمزيد من التفاصيل، يرجى زيارة الرابط). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +إذا بدت العملية مربكة، فلا تقلق! تلتزم مؤسسة الغراف بدعم المتكاملين من خلال تعزيز التعاون وتوفير المعلومات الجوهرية وتوجيههم خلال مراحل مختلفة، بما في ذلك توجيههم خلال عمليات الحوكمة مثل اقتراحات تحسين الغراف وطلبات السحب. إذا كان لديك أسئلة، فيرجى التواصل مع [info@thegraph.foundation](mailto:info@thegraph.foundation) أو من خلال ديسكورد (باستطاعتك التواصل مع بيدرو، عضو مؤسسة الغراف، أو IndexerDAO أو المطورين الأساسيين الآخرين). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## الأسئلة الشائعة + +### 1. كيف يتعلق هذا بـ مقترح تحسين الغراف "خدمات عالم البيانات" (https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)؟ + +هذه العملية مرتبطة بخدمة بيانات الغراف الفرعي، وهي مطبقة فقط على مصادر بيانات الغراف الفرعي الجديد. + +### 2. ماذا يحدث إذا تم دعم فايرهوز و سبستريمز بعد أن تم دعم الشبكة على الشبكة الرئيسية؟ + +هذا سيؤثر فقط على دعم البروتوكول لمكافآت الفهرسة على الغرافات الفرعية المدعومة من سبستريمز. تنفيذ الفايرهوز الجديد سيحتاج إلى الفحص على شبكة الاختبار، وفقًا للمنهجية الموضحة للمرحلة الثانية في هذا المقترح لتحسين الغراف. وعلى نحو مماثل، وعلى افتراض أن التنفيذ فعال وموثوق به، سيتتطالب إنشاء طلب سحب على [مصفوفة دعم الميزات] (https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) ("مصادر بيانات سبستريمز" ميزة للغراف الفرعي)، بالإضافة إلى مقترح جديد لتحسين الغراف، لدعم البروتوكول لمكافآت الفهرسة. يمكن لأي شخص إنشاء طلب السحب ومقترح تحسين الغراف؛ وسوف تساعد المؤسسة في الحصول على موافقة المجلس. + +### 3. How much time will the process of reaching full protocol support take? + +يُتوقع أن يستغرق الوصول إلى الشبكة الرئيسية عدة أسابيع، وذلك يعتمد على وقت تطوير التكامل، وما إذا كانت هناك حاجة إلى بحوث إضافية، واختبارات وإصلاحات الأخطاء، وكذلك توقيت عملية الحوكمة التي تتطلب ملاحظات المجتمع كما هو الحال دائمًا. + +يعتمد دعم البروتوكول لمكافآت الفهرسة على قدرة أصحاب الحصص في المضي قدماً في عمليات الفحص وجمع الملاحظات ومعالجة المساهمات في قاعدة الكود الأساسية، إذا كان ذلك قابلاً للتطبيق. هذا مرتبط مباشرة بنضج عملية التكامل ومدى استجابة فريق التكامل (والذي قد يكون أو قد لا يكون نفس الفريق المسؤول عن تنفيذ إجراء الإستدعاء عن بعد\الفايرهوز). المؤسسة هنا لمساعدة الدعم خلال العملية بأكملها. + +### 4. كيف سيتم التعامل مع الأولويات؟ + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. diff --git a/website/pages/ar/indexing/new-chain-integration.mdx b/website/pages/ar/indexing/new-chain-integration.mdx new file mode 100644 index 000000000000..90fd989fffce --- /dev/null +++ b/website/pages/ar/indexing/new-chain-integration.mdx @@ -0,0 +1,80 @@ +--- +title: New Chain Integration +--- + +Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: + +1. **EVM JSON-RPC** +2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. + +> Note that while the recommended approach is to develop a new Firehose for all new chains, it is only required for non-EVM chains. + +## Integration Strategies + +### 1. EVM JSON-RPC + +If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. + +#### اختبار استدعاء إجراء عن بُعد باستخدام تمثيل كائنات جافا سكريبت لآلة الإيثريوم الافتراضية (EVM JSON-RPC) + +For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON-RPC methods: + +- `eth_getLogs` +- `eth_call` (for historical blocks, with EIP-1898 - requires archive node) +- `eth_getBlockByNumber` +- `eth_getBlockByHash` +- `net_version` +- `eth_getTransactionReceipt`، ضمن طلب دفعة استدعاء الإجراء عن بُعد باستخدام تمثيل كائنات جافا سكريبت +- `trace_filter` *(optionally required for Graph Node to support call handlers)* + +### 2. Firehose Integration + +[Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. + +The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. + +> NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. + +#### Specific Firehose Instrumentation for EVM (`geth`) chains + +For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. + +![Base block vs Extended block](/img/extended-vs-base-substreams-blocks.png) + +> NOTE: This improvement upon the Firehose requires chains make use of the EVM engine `geth version 1.13.0` and up. + +## EVM considerations - Difference between JSON-RPC & Firehose + +While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/subgraphs/cookbook/substreams-powered-subgraphs/) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. + +- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. + +> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) + +## تكوين عقدة الغراف + +Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. + +1. [استنسخ عقدة الغراف](https://github.com/graphprotocol/graph-node) + +2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL + + > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. + +3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ + +### Testing an EVM JSON-RPC by locally deploying a subgraph + +1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) +2. قم بإنشاء مثالًا بسيطًا للغراف الفرعي. بعض الخيارات المتاحة هي كالتالي: + 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point + 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) +3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. +4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` +5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` + +إذا لم تكن هناك أخطاء يجب أن يقوم عقدة الغراف بمزامنة الغراف الفرعي المنشور. قم بمنحه بعض الوقت لإتمام عملية المزامنة، ثم قم بإرسال بعض استعلامات لغة الإستعلام للغراف (GraphQL) إلى نقطة نهاية واجهة برمجة التطبيقات الموجودة في السجلات. + +## Substreams-powered Subgraphs + +For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/substreams/sps/introduction/). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. diff --git a/website/pages/ar/indexing/overview.mdx b/website/pages/ar/indexing/overview.mdx new file mode 100644 index 000000000000..060c87662a3c --- /dev/null +++ b/website/pages/ar/indexing/overview.mdx @@ -0,0 +1,819 @@ +--- +title: Indexing +--- + +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. + +GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. + +يختار المفهرسون subgraphs للقيام بالفهرسة بناء على إشارة تنسيق subgraphs ، حيث أن المنسقون يقومون ب staking ل GRT وذلك للإشارة ل Subgraphs عالية الجودة. يمكن أيضا للعملاء (مثل التطبيقات) تعيين بارامترات حيث يقوم المفهرسون بمعالجة الاستعلامات ل Subgraphs وتسعير رسوم الاستعلام. + + + +## الأسئلة الشائعة + +### What is the minimum stake required to be an Indexer on the network? + +The minimum stake for an Indexer is currently set to 100K GRT. + +### What are the revenue streams for an Indexer? + +**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. + +**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. + +### How are indexing rewards distributed? + +Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** + +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. + +### ما هو إثبات الفهرسة (POI)؟ + +POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. + +### متى يتم توزيع مكافآت الفهرسة؟ + +Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). + +### Can pending indexing rewards be monitored? + +The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. + +تشتمل العديد من لوحات المعلومات التي أنشأها المجتمع على قيم المكافآت المعلقة ويمكن التحقق منها بسهولة يدويًا باتباع الخطوات التالية: + +1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: + +```graphql +} query indexerAllocations +} indexer(id: "") + } allocations + } activeForIndexer + } allocations + id + { + { + { + { +{ +``` + +استخدم Etherscan لاستدعاء `()getRewards`: + +- انتقل إلى [ واجهة Etherscan لعقد المكافآت Rewards contract ](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) + +* لاستدعاء `getRewards()`: + - Expand the **9. getRewards** dropdown. + - أدخل ** معرّف التخصيص ** في الإدخال. + - انقر فوق الزر ** الاستعلام **. + +### ما هي الاعتراضات disputes وأين يمكنني عرضها؟ + +يمكن الاعتراض على استعلامات المفهرس وتخصيصاته على The Graph أثناء فترة الاعتراض dispute. تختلف فترة الاعتراض حسب نوع الاعتراض. تحتوي الاستعلامات / الشهادات Queries/attestations على نافذة اعتراض لـ 7 فترات ، في حين أن المخصصات لها 56 فترة. بعد مرور هذه الفترات ، لا يمكن فتح اعتراضات ضد أي من المخصصات أو الاستعلامات. عند فتح الاعتراض ، يجب على الصيادين Fishermen إيداع على الأقل 10000 GRT ، والتي سيتم حجزها حتى يتم الانتهاء من الاعتراض وتقديم حل. الصيادون Fisherman هم المشاركون في الشبكة الذين يفتحون الاعتراضات. + +يمكنك عرض الاعتراضات من واجهة المستخدم في صفحة ملف تعريف المفهرس وذلك من علامة التبويب `Disputes`. + +- إذا تم رفض الاعتراض، فسيتم حرق GRT المودعة من قبل ال Fishermen ، ولن يتم شطب المفهرس المعترض عليه. +- إذا تمت تسوية الاعتراض بالتعادل، فسيتم إرجاع وديعة ال Fishermen ، ولن يتم شطب المفهرس المعترض عليه. +- إذا تم قبول الاعتراض، فسيتم إرجاع GRT التي أودعها الFishermen ، وسيتم شطب المفهرس المعترض عليه وسيكسب Fishermen ال 50٪ من GRT المشطوبة. + +يمكن عرض الاعتراضات في واجهة المستخدم في بروفايل المفهرس ضمن علامة التبويب `Disputes`. + +### ما هي خصومات رسوم الاستعلام ومتى يتم توزيعها؟ + +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. + +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. + +### What is query fee cut and indexing reward cut? + +The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/indexing/overview/#stake-in-the-protocol) for instructions on setting the delegation parameters. + +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. + +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. + +### How do Indexers know which subgraphs to index? + +من خلال تطبيق تقنيات متقدمة لاتخاذ قرارات فهرسة ال subgraph ، وسنناقش العديد من المقاييس الرئيسية المستخدمة لتقييم ال subgraphs في الشبكة: + +- **إشارة التنسيق Curation signal** ـ تعد نسبة إشارة تنسيق الشبكة على subgraph معين مؤشرا جيدا على الاهتمام بهذا ال subgraph، خاصة أثناء المراحل الأولى عندما يزداد حجم الاستعلام. + +- **مجموعة رسوم الاستعلام Query fees collected** ـ تعد البيانات التاريخية لحجم مجموعة رسوم الاستعلام ل subgraph معين مؤشرا جيدا للطلب المستقبلي. + +- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. + +- **ال Subgraphs التي بدون مكافآت فهرسة** ـ بعض الsubgraphs لا تنتج مكافآت الفهرسة بشكل أساسي لأنها تستخدم ميزات غير مدعومة مثل IPFS أو لأنها تستعلم عن شبكة أخرى خارج الشبكة الرئيسية mainnet. سترى رسالة على ال subgraph إذا لا تنتج مكافآت فهرسة. + +### ما هي المتطلبات للهاردوير؟ + +- **صغيرة**ـ يكفي لبدء فهرسة العديد من ال subgraphs، من المحتمل أن تحتاج إلى توسيع. +- ** قياسية ** - هو الإعداد الافتراضي ، ويتم استخدامه في مثال بيانات نشر k8s / terraform. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **كبيرة** - مُعدة لفهرسة جميع ال subgraphs المستخدمة حاليا وأيضا لخدمة طلبات حركة مرور البيانات ذات الصلة. + +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| --- | :-: | :-: | :-: | :-: | :-: | +| صغير | 4 | 8 | 1 | 4 | 16 | +| قياسي | 8 | 30 | 1 | 12 | 48 | +| متوسط | 16 | 64 | 2 | 32 | 64 | +| كبير | 72 | 468 | 3.5 | 48 | 184 | + +### What are some basic security precautions an Indexer should take? + +- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/indexing/overview/#stake-in-the-protocol) for instructions. + +- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. + +## البنية الأساسية + +At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. + +- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. + +- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. + +- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. + +- **خدمة المفهرس Indexer service**- يتعامل مع جميع الاتصالات الخارجية المطلوبة مع الشبكة. ويشارك نماذج التكلفة وحالات الفهرسة ، ويمرر طلبات الاستعلام من البوابات gateways إلى Graph Node ، ويدير مدفوعات الاستعلام عبر قنوات الحالة مع البوابة. + +- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. + +- **Prometheus metrics server** - مكونات The Graph Node والمفهرس يسجلون مقاييسهم على سيرفر المقاييس. + +ملاحظة: لدعم القياس السريع ، يستحسن فصل الاستعلام والفهرسة بين مجموعات مختلفة من العقد Nodes: عقد الاستعلام وعقد الفهرس. + +### نظرة عامة على المنافذ Ports + +> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. + +#### Graph Node + +| المنفذ | الغرض | المسار | CLI Argument | متغيرات البيئة | +| --- | --- | --- | --- | --- | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...

/subgraphs/name/.../... | http-port-- | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...

/subgraphs/name/.../... | ws-port-- | - | +| 8020 | JSON-RPC
(for managing deployments) | / | admin-port-- | - | +| 8030 | Subgraph indexing status API | /graphql | index-node-port-- | - | +| 8040 | Prometheus metrics | /metrics | metrics-port-- | - | + +#### خدمة المفهرس + +| المنفذ | الغرض | المسار | CLI Argument | متغيرات البيئة | +| --- | --- | --- | --- | --- | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | port-- | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | metrics-port-- | - | + +#### وكيل المفهرس(Indexer Agent) + +| المنفذ | الغرض | المسار | CLI Argument | متغيرات البيئة | +| ------ | ----------------- | ------ | ------------------------- | --------------------------------------- | +| 8000 | API إدارة المفهرس | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | + +### قم بإعداد البنية الأساسية للسيرفر باستخدام Terraform على جوجل كلاود + +> ملاحظة: يمكن للمفهرسين كبديل استخدام خدمات أمازون ويب، أو مايكروسوفت أزور، أو علي بابا. + +#### متطلبات التثبيت + +- Google Cloud SDK +- أداة سطر أوامر Kubectl +- Terraform + +#### أنشئ مشروع Google Cloud + +- Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). + +- Navigate to the `./terraform` directory, this is where all commands should be executed. + +```sh +cd terraform +``` + +- قم بالتوثيق بواسطة Google Cloud وأنشئ مشروع جديد. + +```sh +gcloud auth login +project= +gcloud projects create --enable-cloud-apis $project +``` + +- استخدم [صفحة الفوترة] في Google Cloud Console لتمكين الفوترة للمشروع الجديد. + +- قم بإنشاء Google Cloud configuration. + +```sh +proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") +gcloud config configurations create $project +gcloud config set project "$proj_id" +gcloud config set compute/region us-central1 +gcloud config set compute/zone us-central1-a +``` + +- قم بتفعيل Google Cloud APIs المطلوبة. + +```sh +gcloud services enable compute.googleapis.com +gcloud services enable container.googleapis.com +gcloud services enable servicenetworking.googleapis.com +gcloud services enable sqladmin.googleapis.com +``` + +- قم بإنشاء حساب الخدمة حساب الخدمة. + +```sh +svc_name= +gcloud iam service-accounts create $svc_name \ + --description="Service account for Terraform" \ + --display-name="$svc_name" +gcloud iam service-accounts list +# Get the email of the service account from the list +svc=$(gcloud iam service-accounts list --format='get(email)' +--filter="displayName=$svc_name") +gcloud iam service-accounts keys create .gcloud-credentials.json \ + --iam-account="$svc" +gcloud projects add-iam-policy-binding $proj_id \ + --member serviceAccount:$svc \ + --role roles/editor +``` + +- قم بتفعيل ال peering بين قاعدة البيانات ومجموعة Kubernetes التي سيتم إنشاؤها في الخطوة التالية. + +```sh +gcloud compute addresses create google-managed-services-default \ + --prefix-length=20 \ + --purpose=VPC_PEERING \ + --network default \ + --global \ + --description 'IP Range for peer networks.' +gcloud services vpc-peerings connect \ + --network=default \ + --ranges=google-managed-services-default +``` + +- قم بإنشاء الحد الأدنى من ملف التهيئة ل terraform (التحديث حسب الحاجة). + +```sh +indexer= +cat > terraform.tfvars < \ + -f Dockerfile.indexer-service \ + -t indexer-service:latest \ +# Indexer agent +docker build \ + --build-arg NPM_TOKEN= \ + -f Dockerfile.indexer-agent \ + -t indexer-agent:latest \ +``` + +- قم بتشغيل المكونات + +```sh +docker run -p 7600:7600 -it indexer-service:latest ... +docker run -p 18000:8000 -it indexer-agent:latest ... +``` + +**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). + +#### استخدام K8s و Terraform + +See the [Setup Server Infrastructure Using Terraform on Google Cloud](/indexing/overview/#setup-server-infrastructure-using-terraform-on-google-cloud) section + +#### الاستخدام + +> **ملاحظة**: جميع متغيرات الإعدادات الخاصة بوقت التشغيل يمكن تطبيقها إما كبارامترات للأمر عند بدء التشغيل أو باستخدام متغيرات البيئة بالتنسيق `COMPONENT_NAME_VARIABLE_NAME` (على سبيل المثال `INDEXER_AGENT_ETHEREUM`). + +#### وكيل المفهرس(Indexer Agent) + +```sh +graph-indexer-agent start \ + --ethereum \ + --ethereum-network mainnet \ + --mnemonic \ + --indexer-address \ + --graph-node-query-endpoint http://localhost:8000/ \ + --graph-node-status-endpoint http://localhost:8030/graphql \ + --graph-node-admin-endpoint http://localhost:8020/ \ + --public-indexer-url http://localhost:7600/ \ + --indexer-geo-coordinates \ + --index-node-ids default \ + --indexer-management-port 18000 \ + --metrics-port 7040 \ + --network-subgraph-endpoint http://query-node-0:8000/subgraphs/id/QmUzRg2HHMpbgf6Q4VHKNDbtBEJnyp5JWCh2gUX9AV6jXv \ + --default-allocation-amount 100 \ + --register true \ + --inject-dai true \ + --postgres-host localhost \ + --postgres-port 5432 \ + --postgres-username \ + --postgres-password \ + --postgres-database indexer \ + --allocation-management auto \ + | pino-pretty +``` + +#### خدمة المفهرس Indexer service + +```sh +SERVER_HOST=localhost \ +SERVER_PORT=5432 \ +SERVER_DB_NAME=is_staging \ +SERVER_DB_USER= \ +SERVER_DB_PASSWORD= \ +graph-indexer-service start \ + --ethereum \ + --ethereum-network mainnet \ + --mnemonic \ + --indexer-address \ + --port 7600 \ + --metrics-port 7300 \ + --graph-node-query-endpoint http://localhost:8000/ \ + --graph-node-status-endpoint http://localhost:8030/graphql \ + --postgres-host localhost \ + --postgres-port 5432 \ + --postgres-username \ + --postgres-password \ + --postgres-database is_staging \ + --network-subgraph-endpoint http://query-node-0:8000/subgraphs/id/QmUzRg2HHMpbgf6Q4VHKNDbtBEJnyp5JWCh2gUX9AV6jXv \ + | pino-pretty +``` + +#### CLI المفهرس + +CLI المفهرس هو مكون إضافي لـ [`graphprotocol/graph-cli@`](https://www.npmjs.com/package/@graphprotocol/graph-cli) يمكن الوصول إليه عند `graph indexer`. + +```sh +graph indexer connect http://localhost:18000 +graph indexer status +``` + +#### Indexer management using Indexer CLI + +The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. + +#### الاستخدام + +The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. + +- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) + +- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. + +- `graph indexer rules set [options] ...` - قم بتعيين قاعدة أو أكثر من قواعد الفهرسة. + +- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. + +- `graph indexer rules stop [options] ` - توقف عن فهرسة النشر deployment وقم بتعيين ملف `decisionBasis` إلىnever أبدًا ، لذلك سيتم تخطي هذا النشر عند اتخاذ قرار بشأن عمليات النشر للفهرسة. + +- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. + +- `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. + +- `graph indexer action queue allocate ` - Queue allocation action + +- `graph indexer action queue reallocate ` - Queue reallocate action + +- `graph indexer action queue unallocate ` - Queue unallocate action + +- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator + +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution + +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately + +جميع الأوامر التي تعرض القواعد في الخرج output يمكنها الاختيار بين تنسيقات الإخراج المدعومة (`table`, `yaml`, `json`) باستخدام `-output` argument. + +#### قواعد الفهرسة + +Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. + +على سبيل المثال ، إذا كانت القاعدة العامة لديها`minStake` من ** 5 ** (GRT) ، فأي نشر subgraph به أكثر من 5 (GRT) من الحصة المخصصة ستتم فهرستها. قواعد العتبة تتضمن `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, `minAverageQueryFees`. + +نموذج البيانات Data model: + +```graphql +type IndexingRule { + identifier: string + identifierType: IdentifierType + decisionBasis: IndexingDecisionBasis! + allocationAmount: number | null + allocationLifetime: number | null + autoRenewal: boolean + parallelAllocations: number | null + maxAllocationPercentage: number | null + minSignal: string | null + maxSignal: string | null + minStake: string | null + minAverageQueryFees: string | null + custom: string | null + requireSupported: boolean | null + } + +IdentifierType { + deployment + subgraph + group +} + +IndexingDecisionBasis { + rules + never + always + offchain +} +``` + +Example usage of indexing rule: + +``` +graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK + +graph indexer rules set QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK decisionBasis always allocationAmount 123321 allocationLifetime 14 autoRenewal false requireSupported false + +graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK + +graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK +``` + +#### Actions queue CLI + +The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. + +The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: + +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions +- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. +- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. +- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. +- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. + +نموذج البيانات Data model: + +```graphql +Type ActionInput { + status: ActionStatus + type: ActionType + deploymentID: string | null + allocationID: string | null + amount: string | null + poi: string | null + force: boolean | null + source: string + reason: string | null + priority: number | null +} + +ActionStatus { + queued + approved + pending + success + failed + canceled +} + +ActionType { + allocate + unallocate + reallocate + collect +} +``` + +Example usage from source: + +```bash +graph indexer actions get all + +graph indexer actions get --status queued + +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 + +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 + +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae + +graph indexer actions cancel + +graph indexer actions approve 1 3 5 + +graph indexer actions execute approve +``` + +Note that supported action types for allocation management have different input requirements: + +- `Allocate` - allocate stake to a specific subgraph deployment + + - required action params: + - deploymentID + - amount + +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere + + - required action params: + - allocationID + - deploymentID + - optional action params: + - poi + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment + + - required action params: + - allocationID + - deploymentID + - amount + - optional action params: + - poi + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + +#### نماذج التكلفة Cost models + +Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. + +#### Agora + +توفر لغة Agora تنسيقا مرنا للإعلان عن نماذج التكلفة للاستعلامات. نموذج سعر Agora هو سلسلة من العبارات التي يتم تنفيذها بالترتيب لكل استعلام عالي المستوى في GraphQL. بالنسبة إلى كل استعلام عالي المستوى top-level ، فإن العبارة الأولى التي تتطابق معه تحدد سعر هذا الاستعلام. + +تتكون العبارة من المسند predicate ، والذي يستخدم لمطابقة استعلامات GraphQL وتعبير التكلفة والتي عند تقييم النواتج تكون التكلفة ب GRT عشري. قيم الاستعلام الموجودة في ال argument ،قد يتم تسجيلها في المسند predicate واستخدامها في التعبير expression. يمكن أيضًا تعيين Globals وتعويضه في التعبير expression. + +مثال لتكلفة الاستعلام باستخدام النموذج أعلاه: + +``` +# This statement captures the skip value, +# uses a boolean expression in the predicate to match specific queries that use `skip` +# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global +query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; + +# This default will match any GraphQL expression. +# It uses a Global substituted into the expression to calculate cost +default => 0.1 * $SYSTEM_LOAD; +``` + +مثال على نموذج التكلفة: + +| الاستعلام | السعر | +| ---------------------------------------------------------------------------- | ------- | +| { pairs(skip: 5000) { id } } | 0.5 GRT | +| { tokens { symbol } } | 0.1 GRT | +| { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | + +#### تطبيق نموذج التكلفة + +Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. + +```sh +'indexer cost set variables '{ "SYSTEM_LOAD": 1.4 } +indexer cost set model my_model.agora +``` + +## التفاعل مع الشبكة + +### Stake in the protocol + +The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. + +> Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). + +Once an Indexer has staked GRT in the protocol, the [Indexer components](/indexing/overview/#indexer-components) can be started up and begin their interactions with the network. + +#### اعتماد التوكن tokens + +1. افتح [ تطبيق Remix ](https://remix.ethereum.org/) على المتصفح + +2. في `File Explorer` أنشئ ملفا باسم ** GraphToken.abi ** باستخدام [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). + +3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. + +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. + +5. قم بتعيين عنوان GraphToken - الصق العنوان (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) بجوار `At Address` وانقر على الزر `At address` لتطبيق ذلك. + +6. استدعي دالة `approve(spender, amount)` للموافقة على عقد Staking. املأ `spender` بعنوان عقد Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) واملأ `amount` بالتوكن المراد عمل staking لها (في wei). + +#### Stake tokens + +1. افتح [ تطبيق Remix ](https://remix.ethereum.org/) على المتصفح + +2. في `File Explorer` أنشئ ملفا باسم ** Staking.abi ** باستخدام Staking ABI. + +3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. + +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. + +5. عيّن عنوان عقد Staking - الصق عنوان عقد Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) بجوار `At address` وانقر على الزر `At address` لتطبيق ذلك. + +6. استدعي `stake()` لوضع GRT في البروتوكول. + +7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. + +8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. + +``` +setDelegationParameters(950000, 600000, 500) +``` + +### Setting delegation parameters + +The `setDelegationParameters()` function in the [staking contract](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol) is essential for Indexers, allowing them to set parameters that define their interactions with Delegators, influencing their reward sharing and delegation capacity. + +### How to set delegation parameters + +To set the delegation parameters using Graph Explorer interface, follow these steps: + +1. Navigate to [Graph Explorer](https://thegraph.com/explorer/). +2. Connect your wallet. Choose multisig (such as Gnosis Safe) and then select mainnet. Note: You will need to repeat this process for Arbitrum One. +3. Connect the wallet you have as a signer. +4. Navigate to the 'Settings' section and select 'Delegation Parameters'. These parameters should be configured to achieve an effective cut within the desired range. Upon entering values in the provided input fields, the interface will automatically calculate the effective cut. Adjust these values as necessary to attain the desired effective cut percentage. +5. Submit the transaction to the network. + +> Note: This transaction will need to be confirmed by the multisig wallet signers. + +### عمر التخصيص allocation + +After being created by an Indexer a healthy allocation goes through two states. + +- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. + +- **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/indexing/overview/#how-are-indexing-rewards-distributed)). + +Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/ar/supported-network-requirements.mdx b/website/pages/ar/indexing/supported-network-requirements.mdx similarity index 100% rename from website/pages/ar/supported-network-requirements.mdx rename to website/pages/ar/indexing/supported-network-requirements.mdx diff --git a/website/pages/ar/indexing/tap.mdx b/website/pages/ar/indexing/tap.mdx new file mode 100644 index 000000000000..cc523ded6384 --- /dev/null +++ b/website/pages/ar/indexing/tap.mdx @@ -0,0 +1,193 @@ +--- +title: TAP Migration Guide +--- + +Learn about The Graph’s new payment system, **Timeline Aggregation Protocol, TAP**. This system provides fast, efficient microtransactions with minimized trust. + +## نظره عامة + +[TAP](https://docs.rs/tap_core/latest/tap_core/index.html) is a drop-in replacement to the Scalar payment system currently in place. It provides the following key features: + +- Efficiently handles micropayments. +- Adds a layer of consolidations to on-chain transactions and costs. +- Allows Indexers control of receipts and payments, guaranteeing payment for queries. +- It enables decentralized, trustless gateways and improves `indexer-service` performance for multiple senders. + +## Specifics + +TAP allows a sender to make multiple payments to a receiver, **TAP Receipts**, which aggregates these payments into a single payment, a **Receipt Aggregate Voucher**, also known as a **RAV**. This aggregated payment can then be verified on the blockchain, reducing the number of transactions and simplifying the payment process. + +For each query, the gateway will send you a `signed receipt` that is stored on your database. Then, these queries will be aggregated by a `tap-agent` through a request. Afterwards, you’ll receive a RAV. You can update a RAV by sending it with newer receipts and this will generate a new RAV with an increased value. + +### RAV Details + +- It’s money that is waiting to be sent to the blockchain. + +- It will continue to send requests to aggregate and ensure that the total value of non-aggregated receipts does not exceed the `amount willing to lose`. + +- Each RAV can be redeemed once in the contracts, which is why they are sent after the allocation is closed. + +### Redeeming RAV + +As long as you run `tap-agent` and `indexer-agent`, everything will be executed automatically. The following provides a detailed breakdown of the process: + +1. An Indexer closes allocation. + +2. ` period, tap-agent` takes all pending receipts for that specific allocation and requests an aggregation into a RAV, marking it as `last`. + +3. `indexer-agent` takes all the last RAVS and sends redeem requests to the blockchain, which will update the value of `redeem_at`. + +4. During the `` period, `indexer-agent` monitors if the blockchain has any reorganizations that revert the transaction. + + - If it was reverted, the RAV is resent to the blockchain. If it was not reverted, it gets marked as `final`. + +## Blockchain Addresses + +### Contracts + +| Contract | Arbitrum Mainnet (42161) | Arbitrum Sepolia (421614) | +| ------------------- | -------------------------------------------- | -------------------------------------------- | +| TAP Verifier | `0x33f9E93266ce0E108fc85DdE2f71dab555A0F05a` | `0xfC24cE7a4428A6B89B52645243662A02BA734ECF` | +| AllocationIDTracker | `0x5B2F33d7Ca6Ec88f5586f2528f58c20843D9FE7c` | `0xAaC28a10d707bbc6e02029f1bfDAEB5084b2aD11` | +| Escrow | `0x8f477709eF277d4A880801D01A140a9CF88bA0d3` | `0x1e4dC4f9F95E102635D8F7ED71c5CdbFa20e2d02` | + +### Gateway + +| Component | Edge and Node Mainnet (Aribtrum Mainnet) | Edge and Node Testnet (Arbitrum Sepolia) | +| ---------- | --------------------------------------------- | --------------------------------------------- | +| Sender | `0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467` | `0xC3dDf37906724732FfD748057FEBe23379b0710D` | +| Signers | `0xfF4B7A5EfD00Ff2EC3518D4F250A27e4c29A2211` | `0xFb142dE83E261e43a81e9ACEADd1c66A0DB121FE` | +| Aggregator | `https://tap-aggregator.network.thegraph.com` | `https://tap-aggregator.testnet.thegraph.com` | + +### Requirements + +In addition to the typical requirements to run an indexer, you’ll need a `tap-escrow-subgraph` endpoint to query TAP updates. You can use The Graph Network to query or host yourself on your `graph-node`. + +- [Graph TAP Aribtrum Sepolia subgraph (for The Graph testnet)](https://thegraph.com/explorer/subgraphs/7ubx365MiqBH5iUz6XWXWT8PTof5BVAyEzdb8m17RvbD) +- [Graph TAP Arbitrum One subgraph (for The Graph mainnet)](https://thegraph.com/explorer/subgraphs/4sukbNVTzGELnhdnpyPqsf1QqtzNHEYKKmJkgaT8z6M1) + +> Note: `indexer-agent` does not currently handle the indexing of this subgraph like it does for the network subgraph deployement. As a result, you have to index it manually. + +## Migration Guide + +### Software versions + +The required software version can be found [here](https://github.com/graphprotocol/indexer/blob/main/docs/networks/arbitrum-one.md#latest-releases). + +### Steps + +1. **Indexer Agent** + + - Follow the [same process](https://github.com/graphprotocol/indexer/pkgs/container/indexer-agent#graph-protocol-indexer-components). + - Give the new argument `--tap-subgraph-endpoint` to activate the new TAP codepaths and enable redeeming of TAP RAVs. + +2. **Indexer Service** + + - Fully replace your current configuration with the [new Indexer Service rs](https://github.com/graphprotocol/indexer-rs). It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + - Like the older version, you can scale Indexer Service horizontally easily. It is still stateless. + +3. **TAP Agent** + + - Run _one_ single instance of [TAP Agent](https://github.com/graphprotocol/indexer-rs) at all times. It's recommend that you use the [container image](https://github.com/orgs/graphprotocol/packages?repo_name=indexer-rs). + +4. **Configure Indexer Service and TAP Agent** + + Configuration is a TOML file shared between `indexer-service` and `tap-agent`, supplied with the argument `--config /path/to/config.toml`. + + Check out the full [configuration](https://github.com/graphprotocol/indexer-rs/blob/main/config/maximal-config-example.toml) and the [default values](https://github.com/graphprotocol/indexer-rs/blob/main/config/default_values.toml) + +For minimal configuration, use the following template: + +```bash +# You will have to change *all* the values below to match your setup. +# +# Some of the config below are global graph network values, which you can find here: +# +# +# Pro tip: if you need to load some values from the environment into this config, you +# can overwrite with environment variables. For example, the following can be replaced +# by [PREFIX]_DATABASE_POSTGRESURL, where PREFIX can be `INDEXER_SERVICE` or `TAP_AGENT`: +# +# [database] +# postgres_url = "postgresql://indexer:${POSTGRES_PASSWORD}@postgres:5432/indexer_components_0" + +[indexer] +indexer_address = "0x1111111111111111111111111111111111111111" +operator_mnemonic = "celery smart tip orange scare van steel radio dragon joy alarm crane" + +[database] +# The URL of the Postgres database used for the indexer components. The same database +# that is used by the `indexer-agent`. It is expected that `indexer-agent` will create +# the necessary tables. +postgres_url = "postgres://postgres@postgres:5432/postgres" + +[graph_node] +# URL to your graph-node's query endpoint +query_url = "" +# URL to your graph-node's status endpoint +status_url = "" + +[subgraphs.network] +# Query URL for the Graph Network subgraph. +query_url = "" +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + +[subgraphs.escrow] +# Query URL for the Escrow subgraph. +query_url = "" +# Optional, deployment to look for in the local `graph-node`, if locally indexed. +# Locally indexing the subgraph is recommended. +# NOTE: Use `query_url` or `deployment_id` only +deployment_id = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + +[blockchain] +# The chain ID of the network that the graph network is running on +chain_id = 1337 +# Contract address of TAP's receipt aggregate voucher (RAV) verifier. +receipts_verifier_address = "0x2222222222222222222222222222222222222222" + +######################################## +# Specific configurations to tap-agent # +######################################## +[tap] +# This is the amount of fees you are willing to risk at any given time. For ex. +# if the sender stops supplying RAVs for long enough and the fees exceed this +# amount, the indexer-service will stop accepting queries from the sender +# until the fees are aggregated. +# NOTE: Use strings for decimal values to prevent rounding errors +# e.g: +# max_amount_willing_to_lose_grt = "0.1" +max_amount_willing_to_lose_grt = 20 + +[tap.sender_aggregator_endpoints] +# Key-Value of all senders and their aggregator endpoints +# This one below is for the E&N testnet gateway for example. +0xDDE4cfFd3D9052A9cb618fC05a1Cd02be1f2F467 = "https://tap-aggregator.network.thegraph.com" +``` + +Notes: + +- Values for `tap.sender_aggregator_endpoints` can be found in the [gateway section](/indexing/tap/#gateway). +- Values for `blockchain.receipts_verifier_address` must be used accordingly to the [Blockchain addresses section](/indexing/tap/#contracts) using the appropriate chain id. + +**Log Level** + +- You can set the log level by using the `RUST_LOG` environment variable. +- It’s recommended that you set it to `RUST_LOG=indexer_tap_agent=debug,info`. + +## Monitoring + +### Metrics + +All components expose the port 7300 to be queried by prometheus. + +### Grafana Dashboard + +You can download [Grafana Dashboard](https://github.com/graphprotocol/indexer-rs/blob/main/docs/dashboard.json) and import. + +### Launchpad + +Currently, there is a WIP version of `indexer-rs` and `tap-agent` that can be found [here](https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer) diff --git a/website/pages/ar/indexing/tooling/_meta.js b/website/pages/ar/indexing/tooling/_meta.js new file mode 100644 index 000000000000..d644d6025b92 --- /dev/null +++ b/website/pages/ar/indexing/tooling/_meta.js @@ -0,0 +1,5 @@ +import meta from '../../../en/indexing/tooling/_meta.js' + +export default { + ...meta, +} diff --git a/website/pages/ar/firehose.mdx b/website/pages/ar/indexing/tooling/firehose.mdx similarity index 100% rename from website/pages/ar/firehose.mdx rename to website/pages/ar/indexing/tooling/firehose.mdx diff --git a/website/pages/ar/operating-graph-node.mdx b/website/pages/ar/indexing/tooling/graph-node.mdx similarity index 100% rename from website/pages/ar/operating-graph-node.mdx rename to website/pages/ar/indexing/tooling/graph-node.mdx diff --git a/website/pages/ar/graphcast.mdx b/website/pages/ar/indexing/tooling/graphcast.mdx similarity index 100% rename from website/pages/ar/graphcast.mdx rename to website/pages/ar/indexing/tooling/graphcast.mdx diff --git a/website/pages/ar/managing/_meta.js b/website/pages/ar/managing/_meta.js deleted file mode 100644 index a7c7b3d79464..000000000000 --- a/website/pages/ar/managing/_meta.js +++ /dev/null @@ -1,5 +0,0 @@ -import meta from '../../en/managing/_meta.js' - -export default { - ...meta, -} diff --git a/website/pages/ar/network/_meta.js b/website/pages/ar/network/_meta.js deleted file mode 100644 index 49858537c885..000000000000 --- a/website/pages/ar/network/_meta.js +++ /dev/null @@ -1,5 +0,0 @@ -import meta from '../../en/network/_meta.js' - -export default { - ...meta, -} diff --git a/website/pages/ar/network/benefits.mdx b/website/pages/ar/network/benefits.mdx deleted file mode 100644 index 3bdd3f1e6e25..000000000000 --- a/website/pages/ar/network/benefits.mdx +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: The Graph Network vs. Self Hosting -socialImage: https://thegraph.com/docs/img/seo/benefits.jpg ---- - -The Graph’s decentralized network has been engineered and refined to create a robust indexing and querying experience—and it’s getting better every day thanks to thousands of contributors around the world. - -The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. - -Here is an analysis: - -## Why You Should Use The Graph Network - -- Significantly lower monthly costs -- $0 infrastructure setup costs -- Superior uptime -- Access to hundreds of independent Indexers around the world -- 24/7 technical support by global community - -## The Benefits Explained - -### Lower & more Flexible Cost Structure - -No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $40 per million queries (~$0.00004 per query). Queries are priced in USD and paid in GRT or credit card. - -Query costs may vary; the quoted cost is the average at time of publication (March 2024). - -## Low Volume User (less than 100,000 queries per month) - -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | $0 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 100,000 (Free Plan) | -| Cost per query | $0 | $0 | -| البنية الأساسية | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | $0 | - -## Medium Volume User (~3M queries per month) - -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $120 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~3,000,000 | -| Cost per query | $0 | $0.00004 | -| البنية الأساسية | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $120 | - -## High Volume User (~30M queries per month) - -| Cost Comparison | Self Hosted | The Graph Network | -| :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $1,200 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | ~30,000,000 | -| Cost per query | $0 | $0.00004 | -| البنية الأساسية | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $1,200 | - -\*including costs for backup: $50-$100 per month - -Engineering time based on $200 per hour assumption - -Reflects cost for data consumer. Query fees are still paid to Indexers for Free Plan queries. - -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. - -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). - -## No Setup Costs & Greater Operational Efficiency - -Zero setup fees. Get started immediately with no setup or overhead costs. No hardware requirements. No outages due to centralized infrastructure, and more time to concentrate on your core product . No need for backup servers, troubleshooting, or expensive engineering resources. - -## Reliability & Resiliency - -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. - -Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. - -Start using The Graph Network today, and learn how to [publish your subgraph to The Graph's decentralized network](/quick-start). diff --git a/website/pages/ar/network/contracts.mdx b/website/pages/ar/network/contracts.mdx deleted file mode 100644 index 6abd80577ced..000000000000 --- a/website/pages/ar/network/contracts.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Protocol Contracts ---- - -import { ProtocolContractsTable } from '@/src/contracts' - -Below are the deployed contracts which power The Graph Network. Visit the official [contracts repository](https://github.com/graphprotocol/contracts) to learn more. - -## Arbitrum - -This is the principal deployment of The Graph Network. - - - -## Mainnet - -This was the original deployment of The Graph Network. [Learn more](/arbitrum/arbitrum-faq) about The Graph's scaling with Arbitrum. - - - -## Arbitrum Sepolia - -This is the primary testnet for The Graph Network. Testnet is predominantly used by core developers and ecosystem participants for testing purposes. There are no guarantees of service or availability on The Graph's testnets. - - - -## Sepolia - - diff --git a/website/pages/ar/network/curating.mdx b/website/pages/ar/network/curating.mdx deleted file mode 100644 index 970b6fbbc405..000000000000 --- a/website/pages/ar/network/curating.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Curating ---- - -Curators are critical to The Graph's decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through Graph Explorer, Curators view network data to make signaling decisions. In turn, The Graph Network rewards Curators who signal on good quality subgraphs with a share of the query fees those subgraphs generate. The amount of GRT signaled is one of the key considerations for indexers when determining which subgraphs to index. - -## What Does Signaling Mean for The Graph Network? - -Before consumers can query a subgraph, it must be indexed. This is where curation comes into play. In order for Indexers to earn substantial query fees on quality subgraphs, they need to know what subgraphs to index. When Curators signal on a subgraph, it lets Indexers know that a subgraph is in demand and of sufficient quality that it should be indexed. - -Curators make The Graph network efficient and [signaling](#how-to-signal) is the process that Curators use to let Indexers know that a subgraph is good to index. Indexers can trust the signal from a Curator because upon signaling, Curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. - -Curator signals are represented as ERC20 tokens called Graph Curation Shares (GCS). Those that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators will also earn fewer query fees if they curate on a low-quality subgraph because there will be fewer queries to process or fewer Indexers to process them. - -The [Sunrise Upgrade Indexer](/sunrise/#what-is-the-upgrade-indexer) ensures the indexing of all subgraphs, signaling GRT on a particular subgraph will draw more indexers to it. This incentivization of additional Indexers through curation aims to enhance the quality of service for queries by reducing latency and enhancing network availability. - -When signaling, Curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. If they signal using auto-migrate, a curator’s shares will always be updated to the latest version published by the developer. If they decide to signal on a specific version instead, shares will always stay on this specific version. - -If you require assistance with curation to enhance the quality of service, please send a request to the Edge & Node team at support@thegraph.zendesk.com and specify the subgraphs you need assistance with. - -Indexers can find subgraphs to index based on curation signals they see in Graph Explorer (screenshot below). - -![مستكشف الفرعيةرسم بياني](/img/explorer-subgraphs.png) - -## كيفية الإشارة - -Within the Curator tab in Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in Graph Explorer, [click here.](/network/explorer) - -يمكن للمنسق الإشارة إلى إصدار معين ل subgraph ، أو يمكنه اختيار أن يتم ترحيل migrate إشاراتهم تلقائيا إلى أحدث إصدار لهذا ال subgraph. كلاهما استراتيجيات سليمة ولها إيجابيات وسلبيات. - -Signaling on a specific version is especially useful when one subgraph is used by multiple dapps. One dapp might need to regularly update the subgraph with new features. Another dapp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. - -Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. - -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, and also transfers tokens into The Graph proxy. - -## Withdrawing your GRT - -Curators have the option to withdraw their signaled GRT at any time. - -Unlike the process of delegating, if you decide to withdraw your signaled GRT you will not have to wait for a cooldown period and will receive the entire amount (minus the 1% curation tax). - -Once a curator withdraws their signal, indexers may choose to keep indexing the subgraph, even if there's currently no active GRT signaled. - -However, it is recommended that curators leave their signaled GRT in place not only to receive a portion of the query fees, but also to ensure reliability and uptime of the subgraph. - -## المخاطر - -1. سوق الاستعلام يعتبر حديثا في The Graph وهناك خطر من أن يكون٪ APY الخاص بك أقل مما تتوقع بسبب ديناميكيات السوق الناشئة. -2. Curation Fee - when a curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned. -3. (Ethereum only) When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dapp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/network/delegating). -4. يمكن أن يفشل ال subgraph بسبب خطأ. ال subgraph الفاشل لا يمكنه إنشاء رسوم استعلام. نتيجة لذلك ، سيتعين عليك الانتظار حتى يصلح المطور الخطأ وينشر إصدارا جديدا. - - إذا كنت مشتركا في أحدث إصدار من subgraph ، فسيتم ترحيل migrate أسهمك تلقائيا إلى هذا الإصدار الجديد. هذا سيتحمل ضريبة تنسيق بنسبة 0.5٪. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. You can then signal on the new subgraph version, thus incurring a 1% curation tax. - -## الأسئلة الشائعة حول التنسيق - -### 1. ما هي النسبة المئوية لرسوم الاستعلام التي يكسبها المنسقون؟ - -By signalling on a subgraph, you will earn a share of all the query fees that the subgraph generates. 10% of all query fees go to the Curators pro-rata to their curation shares. This 10% is subject to governance. - -### 2. كيف يمكنني تقرير ما إذا كان ال subgraph عالي الجودة لكي أقوم بالإشارة إليه؟ - -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dapp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: - -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. - -### 3. What’s the cost of updating a subgraph? - -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curator shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because upgrading subgraphs is an on-chain action that costs gas. - -### 4. How often can I update my subgraph? - -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. - -### 5. هل يمكنني بيع أسهم التنسيق الخاصة بي؟ - -Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed). - -As a Curator on Arbitrum, you are guaranteed to get back the GRT you initially deposited (minus the tax). - -### 6. Am I eligible for a curation grant? - -Curation grants are determined individually on a case-by-case basis. If you need assistance with curation, please send a request to support@thegraph.zendesk.com. - -لازلت مشوشا؟ راجع فيديو دليل التنسيق أدناه: - - diff --git a/website/pages/ar/network/developing.mdx b/website/pages/ar/network/developing.mdx deleted file mode 100644 index 6f456be01c17..000000000000 --- a/website/pages/ar/network/developing.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Developing ---- - -To start coding right away, go to [Developer Quick Start](/quick-start/). - -## نظره عامة - -As a developer, you need data to build and power your dapp. Querying and indexing blockchain data is challenging, but The Graph provides a solution to this issue. - -On The Graph, you can: - -1. Create, deploy, and publish subgraphs to The Graph using Graph CLI and [Subgraph Studio](https://thegraph.com/studio/). -2. Use GraphQL to query existing subgraphs. - -### What is GraphQL? - -- [GraphQL](https://graphql.org/learn/) is the query language for APIs and a runtime for executing those queries with your existing data. The Graph uses GraphQL to query subgraphs. - -### Developer Actions - -- Query subgraphs built by other developers in [The Graph Network](https://thegraph.com/explorer) and integrate them into your own dapps. -- Create custom subgraphs to fulfill specific data needs, allowing improved scalability and flexibility for other developers. -- Deploy, publish and signal your subgraphs within The Graph Network. - -### What are subgraphs? - -A subgraph is a custom API built on blockchain data. It extracts data from a blockchain, processes it, and stores it so that it can be easily queried via GraphQL. - -Check out the documentation on [subgraphs](/subgraphs/) to learn specifics. diff --git a/website/pages/ar/network/explorer.mdx b/website/pages/ar/network/explorer.mdx deleted file mode 100644 index 2024b24bcd1c..000000000000 --- a/website/pages/ar/network/explorer.mdx +++ /dev/null @@ -1,236 +0,0 @@ ---- -title: Graph Explorer ---- - -Learn about The Graph Explorer and access the world of subgraphs and network data. - -Graph Explorer consists of multiple parts where you can interact with other subgraph developers, dapp developers, Curators, Indexers, and Delegators. - -## Video Guide - -For a general overview of Graph Explorer, check out the video below: - - - -## Subgraphs - -After you just finish deploying and publishing your subgraph in Subgraph Studio, click on the "subgraphs tab” at the top of the navigation bar to access the following: - -- Your own finished subgraphs -- Subgraphs published by others -- The exact subgraph you want (based on the date created, signal amount, or name). - -![صورة المستكشف 1](/img/Subgraphs-Explorer-Landing.png) - -When you click into a subgraph, you will be able to do the following: - -- Test queries in the playground and be able to leverage network details to make informed decisions. -- Signal GRT on your own subgraph or the subgraphs of others to make indexers aware of its importance and quality. -- This is critical because signaling on a subgraph incentivizes it to be indexed, meaning it’ll eventually surface on the network to serve queries. - -![صورة المستكشف 2](/img/Subgraph-Details.png) - -On each subgraph’s dedicated page, you can do the following: - -- أشر/الغي الإشارة على Subgraphs -- اعرض المزيد من التفاصيل مثل المخططات و ال ID الحالي وبيانات التعريف الأخرى -- بدّل بين الإصدارات وذلك لاستكشاف التكرارات السابقة ل subgraphs -- استعلم عن subgraphs عن طريق GraphQL -- اختبار subgraphs في playground -- اعرض المفهرسين الذين يفهرسون Subgraphs معين -- إحصائيات subgraphs (المخصصات ، المنسقين ، إلخ) -- اعرض من قام بنشر ال Subgraphs - -![صورة المستكشف 3](/img/Explorer-Signal-Unsignal.png) - -## المشاركون - -This section provides a bird' s-eye view of all "participants," which includes everyone participating in the network, such as Indexers, Delegators, and Curators. - -### 1. Indexers - -![صورة المستكشف 4](/img/Indexer-Pane.png) - -Indexers are the backbone of the protocol. They stake on subgraphs, index them, and serve queries to anyone consuming subgraphs. - -In the Indexers table, you can see an Indexers’ delegation parameters, their stake, how much they have staked to each subgraph, and how much revenue they have made from query fees and indexing rewards. - -**Specifics** - -- Query Fee Cut - the % of the query fee rebates that the Indexer keeps when splitting with Delegators. -- Effective Reward Cut - the indexing reward cut applied to the delegation pool. If it’s negative, it means that the Indexer is giving away part of their rewards. If it’s positive, it means that the Indexer is keeping some of their rewards. -- Cooldown Remaining - the time remaining until the Indexer can change the above delegation parameters. Cooldown periods are set up by Indexers when they update their delegation parameters. -- Owned - This is the Indexer’s deposited stake, which may be slashed for malicious or incorrect behavior. -- Delegated - Stake from Delegators which can be allocated by the Indexer, but cannot be slashed. -- Allocated - Stake that Indexers are actively allocating towards the subgraphs they are indexing. -- Available Delegation Capacity - the amount of delegated stake the Indexers can still receive before they become over-delegated. -- Max Delegation Capacity - the maximum amount of delegated stake the Indexer can productively accept. An excess delegated stake cannot be used for allocations or rewards calculations. -- Query Fees - this is the total fees that end users have paid for queries from an Indexer over all time. -- مكافآت المفهرس Indexer Rewards - هو مجموع مكافآت المفهرس التي حصل عليها المفهرس ومفوضيهم Delegators. تدفع مكافآت المفهرس ب GRT. - -Indexers can earn both query fees and indexing rewards. Functionally, this happens when network participants delegate GRT to an Indexer. This enables Indexers to receive query fees and rewards depending on their Indexer parameters. - -- Indexing parameters can be set by clicking on the right-hand side of the table or by going into an Indexer’s profile and clicking the “Delegate” button. - -To learn more about how to become an Indexer, you can take a look at the [official documentation](/network/indexing) or [The Graph Academy Indexer guides.](https://thegraph.academy/delegators/choosing-indexers/) - -![نافذة تفاصيل الفهرسة](/img/Indexing-Details-Pane.png) - -### 3. المفوضون Delegators - -Curators analyze subgraphs to identify which subgraphs are of the highest quality. Once a Curator has found a potentially high-quality subgraph, they can curate it by signaling on its bonding curve. In doing so, Curators let Indexers know which subgraphs are high quality and should be indexed. - -- Curators can be community members, data consumers, or even subgraph developers who signal on their own subgraphs by depositing GRT tokens into a bonding curve. - - By depositing GRT, Curators mint curation shares of a subgraph. As a result, they can earn a portion of the query fees generated by the subgraph they have signaled on. - - The bonding curve incentivizes Curators to curate the highest quality data sources. - -In the The Curator table listed below you can see: - -- التاريخ الذي بدأ فيه المنسق بالتنسق -- عدد ال GRT الذي تم إيداعه -- عدد الأسهم التي يمتلكها المنسق - -![صورة المستكشف 6](/img/Curation-Overview.png) - -If you want to learn more about the Curator role, you can do so by visiting [official documentation.](/network/curating) or [The Graph Academy](https://thegraph.academy/curators/). - -### 3. المفوضون Delegators - -Delegators play a key role in maintaining the security and decentralization of The Graph Network. They participate in the network by delegating (i.e., “staking”) GRT tokens to one or multiple indexers. - -- Without Delegators, Indexers are less likely to earn significant rewards and fees. Therefore, Indexers attract Delegators by offering them a portion of their indexing rewards and query fees. -- Delegators select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. -- Reputation within the community can also play a factor in the selection process. It’s recommended to connect with the selected Indexers via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! - -![صورة المستكشف 7](/img/Delegation-Overview.png) - -In the Delegators table you can see the active Delegators in the community and important metrics: - -- عدد المفهرسين المفوض إليهم -- التفويض الأصلي للمفوض Delegator’s original delegation -- المكافآت التي جمعوها والتي لم يسحبوها من البروتوكول -- المكافآت التي تم سحبها من البروتوكول -- كمية ال GRT التي يمتلكونها حاليا في البروتوكول -- The date they last delegated - -If you want to learn more about how to become a Delegator, check out the [official documentation](/network/delegating) or [The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). - -## Network - -In this section, you can see global KPIs and view the ability to switch to a per-epoch basis and analyze network metrics in more detail. These details will give you a sense of how the network is performing over time. - -### نظره عامة - -The overview section has both all the current network metrics and some cumulative metrics over time: - -- إجمالي حصة الشبكة الحالية -- الحصة المقسمة بين المفهرسين ومفوضيهم -- إجمالي العرض ،و الصك ،وال GRT المحروقة منذ بداية الشبكة -- إجمالي مكافآت الفهرسة منذ بداية البروتوكول -- بارامترات البروتوكول مثل مكافأة التنسيق ومعدل التضخم والمزيد -- رسوم ومكافآت الفترة الحالية - -A few key details to note: - -- **Query fees represent the fees generated by the consumers**. They can be claimed (or not) by the Indexers after a period of at least 7 epochs (see below) after their allocations towards the subgraphs have been closed and the data they served has been validated by the consumers. -- **Indexing rewards represent the amount of rewards the Indexers claimed from the network issuance during the epoch.** Although the protocol issuance is fixed, the rewards only get minted once Indexers close their allocations towards the subgraphs they’ve been indexing. So, the per-epoch number of rewards varies (ie. during some epochs, Indexers might’ve collectively closed allocations that have been open for many days). - -![صورة المستكشف 8](/img/Network-Stats.png) - -### الفترات Epochs - -In the Epochs section, you can analyze on a per-epoch basis, metrics such as: - -- بداية الفترة أو نهايتها -- مكافآت رسوم الاستعلام والفهرسة التي تم جمعها خلال فترة معينة -- حالة الفترة، والتي تشير إلى رسوم الاستعلام وتوزيعها ويمكن أن يكون لها حالات مختلفة: - - الفترة النشطة هي الفترة التي يقوم فيها المفهرسون حاليا بتخصيص الحصص وتحصيل رسوم الاستعلام - - فترات التسوية هي تلك الفترات التي يتم فيها تسوية قنوات الحالة state channels. هذا يعني أن المفهرسين يكونون عرضة للشطب إذا فتح المستخدمون اعتراضات ضدهم. - - فترات التوزيع هي تلك الفترات التي يتم فيها تسوية قنوات الحالة للفترات ويمكن للمفهرسين المطالبة بخصم رسوم الاستعلام الخاصة بهم. - - The finalized epochs are the epochs that have no query fee rebates left to claim by the Indexers. - -![صورة المستكشف 9](/img/Epoch-Stats.png) - -## ملف تعريف المستخدم الخاص بك - -Your personal profile is the place where you can see your network activity, regardless of your role on the network. Your crypto wallet will act as your user profile, and with the User Dashboard, you’ll be able to see the following tabs: - -### نظرة عامة على الملف الشخصي - -In this section, you can view the following: - -- Any of your current actions you've done. -- Your profile information, description, and website (if you added one). - -![صورة المستكشف 10](/img/Profile-Overview.png) - -### تبويب ال Subgraphs - -In the Subgraphs tab, you’ll see your published subgraphs. - -> This will not include any subgraphs deployed with the CLI for testing purposes. Subgraphs will only show up when they are published to the decentralized network. - -![صورة المستكشف 11](/img/Subgraphs-Overview.png) - -### تبويب الفهرسة - -In the Indexing tab, you’ll find a table with all the active and historical allocations towards subgraphs. You will also find charts where you can see and analyze your past performance as an Indexer. - -هذا القسم سيتضمن أيضا تفاصيل حول صافي مكافآت المفهرس ورسوم الاستعلام الصافي الخاصة بك. سترى المقاييس التالية: - -- الحصة المفوضة Delegated Stake - هي الحصة المفوضة من قبل المفوضين والتي يمكنك تخصيصها ولكن لا يمكن شطبها -- إجمالي رسوم الاستعلام Total Query Fees - هو إجمالي الرسوم التي دفعها المستخدمون مقابل الاستعلامات التي قدمتها بمرور الوقت -- مكافآت المفهرس Indexer Rewards - هو المبلغ الإجمالي لمكافآت المفهرس التي تلقيتها ك GRT -- اقتطاع الرسوم Fee Cut -هي النسبة المئوية لخصوم رسوم الاستعلام التي ستحتفظ بها عند التقسيم مع المفوضين -- اقتطاع المكافآت Rewards Cut -هي النسبة المئوية لمكافآت المفهرس التي ستحتفظ بها عند التقسيم مع المفوضين -- مملوكة Owned - هي حصتك المودعة ، والتي يمكن شطبها بسبب السلوك الضار أو غير الصحيح - -![صورة المستكشف 12](/img/Indexer-Stats.png) - -### تبويب التفويض Delegating Tab - -Delegators are important to the Graph Network. They must use their knowledge to choose an Indexer that will provide a healthy return on rewards. - -In the Delegators tab, you can find the details of your active and historical delegations, along with the metrics of the Indexers that you delegated towards. - -في النصف الأول من الصفحة ، يمكنك رؤية مخطط التفويض الخاص بك ، بالإضافة إلى مخطط المكافآت فقط. إلى اليسار ، يمكنك رؤية KPIs التي تعكس مقاييس التفويض الحالية. - -مقاييس التفويض التي ستراها هنا في علامة التبويب هذه تشمل ما يلي: - -- إجمالي مكافآت التفويض -- إجمالي المكافآت الغير محققة -- إجمالي المكافآت المحققة - -في النصف الثاني من الصفحة ، لديك جدول التفويضات. هنا يمكنك رؤية المفهرسين الذين فوضتهم ، بالإضافة إلى تفاصيلهم (مثل المكافآت المقتطعة rewards cuts، و cooldown ، الخ). - -باستخدام الأزرار الموجودة على الجانب الأيمن من الجدول ، يمكنك إدارة التفويض - تفويض المزيد أو إلغاء التفويض أو سحب التفويض بعد فترة الإذابة. - -باستخدام الأزرار الموجودة على الجانب الأيمن من الجدول ، يمكنك إدارة تفويضاتك أو تفويض المزيد أو إلغاء التفويض أو سحب التفويض بعد فترة الذوبان thawing. - -![صورة المستكشف 13](/img/Delegation-Stats.png) - -### تبويب التنسيق Curating - -في علامة التبويب Curation ، ستجد جميع ال subgraphs التي تشير إليها (مما يتيح لك تلقي رسوم الاستعلام). الإشارة تسمح للمنسقين التوضيح للمفهرسين ماهي ال subgraphs ذات الجودة العالية والموثوقة ، مما يشير إلى ضرورة فهرستها. - -ضمن علامة التبويب هذه ، ستجد نظرة عامة حول: - -- جميع ال subgraphs التي تقوم بتنسيقها مع تفاصيل الإشارة -- إجمالي الحصة لكل subgraph -- مكافآت الاستعلام لكل subgraph -- تحديث في تفاصيل التاريخ - -![صورة المستكشف 14](/img/Curation-Stats.png) - -## إعدادات ملف التعريف الخاص بك - -ضمن ملف تعريف المستخدم الخاص بك ، ستتمكن من إدارة تفاصيل ملفك الشخصي (مثل إعداد اسم ENS). إذا كنت مفهرسا ، فستستطيع الوصول إلى إعدادت أكثر. في ملف تعريف المستخدم الخاص بك ، ستتمكن من إعداد بارامترات التفويض والمشغلين. - -- Operators تتخذ إجراءات محدودة في البروتوكول نيابة عن المفهرس ، مثل عمليات فتح وإغلاق المخصصات. Operators هي عناوين Ethereum أخرى ، منفصلة عن محفظة staking الخاصة بهم ، مع بوابة وصول للشبكة التي يمكن للمفهرسين تعيينها بشكل شخصي -- تسمح لك بارامترات التفويض بالتحكم في توزيع GRT بينك وبين المفوضين. - -![صورة المستكشف 15](/img/Profile-Settings.png) - -As your official portal into the world of decentralized data, Graph Explorer allows you to take a variety of actions, no matter your role in the network. You can get to your profile settings by opening the dropdown menu next to your address, then clicking on the Settings button. - -
تفاصيل المحفظة
diff --git a/website/pages/ar/network/indexing.mdx b/website/pages/ar/network/indexing.mdx deleted file mode 100644 index e5cb4d8ea17d..000000000000 --- a/website/pages/ar/network/indexing.mdx +++ /dev/null @@ -1,819 +0,0 @@ ---- -title: Indexing ---- - -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. - -GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. - -يختار المفهرسون subgraphs للقيام بالفهرسة بناء على إشارة تنسيق subgraphs ، حيث أن المنسقون يقومون ب staking ل GRT وذلك للإشارة ل Subgraphs عالية الجودة. يمكن أيضا للعملاء (مثل التطبيقات) تعيين بارامترات حيث يقوم المفهرسون بمعالجة الاستعلامات ل Subgraphs وتسعير رسوم الاستعلام. - - - -## الأسئلة الشائعة - -### What is the minimum stake required to be an Indexer on the network? - -The minimum stake for an Indexer is currently set to 100K GRT. - -### What are the revenue streams for an Indexer? - -**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. - -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. - -### How are indexing rewards distributed? - -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** - -Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/allocation-optimizer) integrated with the indexer software stack. - -### ما هو إثبات الفهرسة (POI)؟ - -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. - -### متى يتم توزيع مكافآت الفهرسة؟ - -Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). - -### Can pending indexing rewards be monitored? - -The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/rewards/RewardsManager.sol#L316) function that can be used to check the pending rewards for a specific allocation. - -تشتمل العديد من لوحات المعلومات التي أنشأها المجتمع على قيم المكافآت المعلقة ويمكن التحقق منها بسهولة يدويًا باتباع الخطوات التالية: - -1. Query the [mainnet subgraph](https://thegraph.com/explorer/subgraphs/9Co7EQe5PgW3ugCUJrJgRv4u9zdEuDJf8NvMWftNsBH8?view=Query&chain=arbitrum-one) to get the IDs for all active allocations: - -```graphql -} query indexerAllocations -} indexer(id: "") - } allocations - } activeForIndexer - } allocations - id - { - { - { - { -{ -``` - -استخدم Etherscan لاستدعاء `()getRewards`: - -- انتقل إلى [ واجهة Etherscan لعقد المكافآت Rewards contract ](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) - -* لاستدعاء `getRewards()`: - - Expand the **9. getRewards** dropdown. - - أدخل ** معرّف التخصيص ** في الإدخال. - - انقر فوق الزر ** الاستعلام **. - -### ما هي الاعتراضات disputes وأين يمكنني عرضها؟ - -يمكن الاعتراض على استعلامات المفهرس وتخصيصاته على The Graph أثناء فترة الاعتراض dispute. تختلف فترة الاعتراض حسب نوع الاعتراض. تحتوي الاستعلامات / الشهادات Queries/attestations على نافذة اعتراض لـ 7 فترات ، في حين أن المخصصات لها 56 فترة. بعد مرور هذه الفترات ، لا يمكن فتح اعتراضات ضد أي من المخصصات أو الاستعلامات. عند فتح الاعتراض ، يجب على الصيادين Fishermen إيداع على الأقل 10000 GRT ، والتي سيتم حجزها حتى يتم الانتهاء من الاعتراض وتقديم حل. الصيادون Fisherman هم المشاركون في الشبكة الذين يفتحون الاعتراضات. - -يمكنك عرض الاعتراضات من واجهة المستخدم في صفحة ملف تعريف المفهرس وذلك من علامة التبويب `Disputes`. - -- إذا تم رفض الاعتراض، فسيتم حرق GRT المودعة من قبل ال Fishermen ، ولن يتم شطب المفهرس المعترض عليه. -- إذا تمت تسوية الاعتراض بالتعادل، فسيتم إرجاع وديعة ال Fishermen ، ولن يتم شطب المفهرس المعترض عليه. -- إذا تم قبول الاعتراض، فسيتم إرجاع GRT التي أودعها الFishermen ، وسيتم شطب المفهرس المعترض عليه وسيكسب Fishermen ال 50٪ من GRT المشطوبة. - -يمكن عرض الاعتراضات في واجهة المستخدم في بروفايل المفهرس ضمن علامة التبويب `Disputes`. - -### ما هي خصومات رسوم الاستعلام ومتى يتم توزيعها؟ - -Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. - -Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. - -### What is query fee cut and indexing reward cut? - -The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. - -- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. - -- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. - -### How do Indexers know which subgraphs to index? - -من خلال تطبيق تقنيات متقدمة لاتخاذ قرارات فهرسة ال subgraph ، وسنناقش العديد من المقاييس الرئيسية المستخدمة لتقييم ال subgraphs في الشبكة: - -- **إشارة التنسيق Curation signal** ـ تعد نسبة إشارة تنسيق الشبكة على subgraph معين مؤشرا جيدا على الاهتمام بهذا ال subgraph، خاصة أثناء المراحل الأولى عندما يزداد حجم الاستعلام. - -- **مجموعة رسوم الاستعلام Query fees collected** ـ تعد البيانات التاريخية لحجم مجموعة رسوم الاستعلام ل subgraph معين مؤشرا جيدا للطلب المستقبلي. - -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. - -- **ال Subgraphs التي بدون مكافآت فهرسة** ـ بعض الsubgraphs لا تنتج مكافآت الفهرسة بشكل أساسي لأنها تستخدم ميزات غير مدعومة مثل IPFS أو لأنها تستعلم عن شبكة أخرى خارج الشبكة الرئيسية mainnet. سترى رسالة على ال subgraph إذا لا تنتج مكافآت فهرسة. - -### ما هي المتطلبات للهاردوير؟ - -- **صغيرة**ـ يكفي لبدء فهرسة العديد من ال subgraphs، من المحتمل أن تحتاج إلى توسيع. -- ** قياسية ** - هو الإعداد الافتراضي ، ويتم استخدامه في مثال بيانات نشر k8s / terraform. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **كبيرة** - مُعدة لفهرسة جميع ال subgraphs المستخدمة حاليا وأيضا لخدمة طلبات حركة مرور البيانات ذات الصلة. - -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | -| --- | :-: | :-: | :-: | :-: | :-: | -| صغير | 4 | 8 | 1 | 4 | 16 | -| قياسي | 8 | 30 | 1 | 12 | 48 | -| متوسط | 16 | 64 | 2 | 32 | 64 | -| كبير | 72 | 468 | 3.5 | 48 | 184 | - -### What are some basic security precautions an Indexer should take? - -- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/network/indexing#stake-in-the-protocol) for instructions. - -- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. - -## البنية الأساسية - -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. - -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. - -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. - -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. - -- **خدمة المفهرس Indexer service**- يتعامل مع جميع الاتصالات الخارجية المطلوبة مع الشبكة. ويشارك نماذج التكلفة وحالات الفهرسة ، ويمرر طلبات الاستعلام من البوابات gateways إلى Graph Node ، ويدير مدفوعات الاستعلام عبر قنوات الحالة مع البوابة. - -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. - -- **Prometheus metrics server** - مكونات The Graph Node والمفهرس يسجلون مقاييسهم على سيرفر المقاييس. - -ملاحظة: لدعم القياس السريع ، يستحسن فصل الاستعلام والفهرسة بين مجموعات مختلفة من العقد Nodes: عقد الاستعلام وعقد الفهرس. - -### نظرة عامة على المنافذ Ports - -> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. - -#### Graph Node - -| المنفذ | الغرض | المسار | CLI Argument | متغيرات البيئة | -| --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...

/subgraphs/name/.../... | http-port-- | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...

/subgraphs/name/.../... | ws-port-- | - | -| 8020 | JSON-RPC
(for managing deployments) | / | admin-port-- | - | -| 8030 | Subgraph indexing status API | /graphql | index-node-port-- | - | -| 8040 | Prometheus metrics | /metrics | metrics-port-- | - | - -#### خدمة المفهرس - -| المنفذ | الغرض | المسار | CLI Argument | متغيرات البيئة | -| --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | port-- | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | metrics-port-- | - | - -#### وكيل المفهرس(Indexer Agent) - -| المنفذ | الغرض | المسار | CLI Argument | متغيرات البيئة | -| ------ | ----------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | API إدارة المفهرس | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | - -### قم بإعداد البنية الأساسية للسيرفر باستخدام Terraform على جوجل كلاود - -> ملاحظة: يمكن للمفهرسين كبديل استخدام خدمات أمازون ويب، أو مايكروسوفت أزور، أو علي بابا. - -#### متطلبات التثبيت - -- Google Cloud SDK -- أداة سطر أوامر Kubectl -- Terraform - -#### أنشئ مشروع Google Cloud - -- Clone or navigate to the [Indexer repository](https://github.com/graphprotocol/indexer). - -- Navigate to the `./terraform` directory, this is where all commands should be executed. - -```sh -cd terraform -``` - -- قم بالتوثيق بواسطة Google Cloud وأنشئ مشروع جديد. - -```sh -gcloud auth login -project= -gcloud projects create --enable-cloud-apis $project -``` - -- استخدم [صفحة الفوترة] في Google Cloud Console لتمكين الفوترة للمشروع الجديد. - -- قم بإنشاء Google Cloud configuration. - -```sh -proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") -gcloud config configurations create $project -gcloud config set project "$proj_id" -gcloud config set compute/region us-central1 -gcloud config set compute/zone us-central1-a -``` - -- قم بتفعيل Google Cloud APIs المطلوبة. - -```sh -gcloud services enable compute.googleapis.com -gcloud services enable container.googleapis.com -gcloud services enable servicenetworking.googleapis.com -gcloud services enable sqladmin.googleapis.com -``` - -- قم بإنشاء حساب الخدمة حساب الخدمة. - -```sh -svc_name= -gcloud iam service-accounts create $svc_name \ - --description="Service account for Terraform" \ - --display-name="$svc_name" -gcloud iam service-accounts list -# Get the email of the service account from the list -svc=$(gcloud iam service-accounts list --format='get(email)' ---filter="displayName=$svc_name") -gcloud iam service-accounts keys create .gcloud-credentials.json \ - --iam-account="$svc" -gcloud projects add-iam-policy-binding $proj_id \ - --member serviceAccount:$svc \ - --role roles/editor -``` - -- قم بتفعيل ال peering بين قاعدة البيانات ومجموعة Kubernetes التي سيتم إنشاؤها في الخطوة التالية. - -```sh -gcloud compute addresses create google-managed-services-default \ - --prefix-length=20 \ - --purpose=VPC_PEERING \ - --network default \ - --global \ - --description 'IP Range for peer networks.' -gcloud services vpc-peerings connect \ - --network=default \ - --ranges=google-managed-services-default -``` - -- قم بإنشاء الحد الأدنى من ملف التهيئة ل terraform (التحديث حسب الحاجة). - -```sh -indexer= -cat > terraform.tfvars < \ - -f Dockerfile.indexer-service \ - -t indexer-service:latest \ -# Indexer agent -docker build \ - --build-arg NPM_TOKEN= \ - -f Dockerfile.indexer-agent \ - -t indexer-agent:latest \ -``` - -- قم بتشغيل المكونات - -```sh -docker run -p 7600:7600 -it indexer-service:latest ... -docker run -p 18000:8000 -it indexer-agent:latest ... -``` - -**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). - -#### استخدام K8s و Terraform - -See the [Setup Server Infrastructure Using Terraform on Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) section - -#### الاستخدام - -> **ملاحظة**: جميع متغيرات الإعدادات الخاصة بوقت التشغيل يمكن تطبيقها إما كبارامترات للأمر عند بدء التشغيل أو باستخدام متغيرات البيئة بالتنسيق `COMPONENT_NAME_VARIABLE_NAME` (على سبيل المثال `INDEXER_AGENT_ETHEREUM`). - -#### وكيل المفهرس(Indexer Agent) - -```sh -graph-indexer-agent start \ - --ethereum \ - --ethereum-network mainnet \ - --mnemonic \ - --indexer-address \ - --graph-node-query-endpoint http://localhost:8000/ \ - --graph-node-status-endpoint http://localhost:8030/graphql \ - --graph-node-admin-endpoint http://localhost:8020/ \ - --public-indexer-url http://localhost:7600/ \ - --indexer-geo-coordinates \ - --index-node-ids default \ - --indexer-management-port 18000 \ - --metrics-port 7040 \ - --network-subgraph-endpoint http://query-node-0:8000/subgraphs/id/QmUzRg2HHMpbgf6Q4VHKNDbtBEJnyp5JWCh2gUX9AV6jXv \ - --default-allocation-amount 100 \ - --register true \ - --inject-dai true \ - --postgres-host localhost \ - --postgres-port 5432 \ - --postgres-username \ - --postgres-password \ - --postgres-database indexer \ - --allocation-management auto \ - | pino-pretty -``` - -#### خدمة المفهرس Indexer service - -```sh -SERVER_HOST=localhost \ -SERVER_PORT=5432 \ -SERVER_DB_NAME=is_staging \ -SERVER_DB_USER= \ -SERVER_DB_PASSWORD= \ -graph-indexer-service start \ - --ethereum \ - --ethereum-network mainnet \ - --mnemonic \ - --indexer-address \ - --port 7600 \ - --metrics-port 7300 \ - --graph-node-query-endpoint http://localhost:8000/ \ - --graph-node-status-endpoint http://localhost:8030/graphql \ - --postgres-host localhost \ - --postgres-port 5432 \ - --postgres-username \ - --postgres-password \ - --postgres-database is_staging \ - --network-subgraph-endpoint http://query-node-0:8000/subgraphs/id/QmUzRg2HHMpbgf6Q4VHKNDbtBEJnyp5JWCh2gUX9AV6jXv \ - | pino-pretty -``` - -#### CLI المفهرس - -CLI المفهرس هو مكون إضافي لـ [`graphprotocol/graph-cli@`](https://www.npmjs.com/package/@graphprotocol/graph-cli) يمكن الوصول إليه عند `graph indexer`. - -```sh -graph indexer connect http://localhost:18000 -graph indexer status -``` - -#### Indexer management using Indexer CLI - -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. - -#### الاستخدام - -The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. - -- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) - -- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. - -- `graph indexer rules set [options] ...` - قم بتعيين قاعدة أو أكثر من قواعد الفهرسة. - -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. - -- `graph indexer rules stop [options] ` - توقف عن فهرسة النشر deployment وقم بتعيين ملف `decisionBasis` إلىnever أبدًا ، لذلك سيتم تخطي هذا النشر عند اتخاذ قرار بشأن عمليات النشر للفهرسة. - -- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. - -- `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additional argument `--status` can be used to print out all actions of a certain status. - -- `graph indexer action queue allocate ` - Queue allocation action - -- `graph indexer action queue reallocate ` - Queue reallocate action - -- `graph indexer action queue unallocate ` - Queue unallocate action - -- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator - -- `graph indexer actions approve [ ...]` - Approve multiple actions for execution - -- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately - -جميع الأوامر التي تعرض القواعد في الخرج output يمكنها الاختيار بين تنسيقات الإخراج المدعومة (`table`, `yaml`, `json`) باستخدام `-output` argument. - -#### قواعد الفهرسة - -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. - -على سبيل المثال ، إذا كانت القاعدة العامة لديها`minStake` من ** 5 ** (GRT) ، فأي نشر subgraph به أكثر من 5 (GRT) من الحصة المخصصة ستتم فهرستها. قواعد العتبة تتضمن `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, `minAverageQueryFees`. - -نموذج البيانات Data model: - -```graphql -type IndexingRule { - identifier: string - identifierType: IdentifierType - decisionBasis: IndexingDecisionBasis! - allocationAmount: number | null - allocationLifetime: number | null - autoRenewal: boolean - parallelAllocations: number | null - maxAllocationPercentage: number | null - minSignal: string | null - maxSignal: string | null - minStake: string | null - minAverageQueryFees: string | null - custom: string | null - requireSupported: boolean | null - } - -IdentifierType { - deployment - subgraph - group -} - -IndexingDecisionBasis { - rules - never - always - offchain -} -``` - -Example usage of indexing rule: - -``` -graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK - -graph indexer rules set QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK decisionBasis always allocationAmount 123321 allocationLifetime 14 autoRenewal false requireSupported false - -graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK - -graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK -``` - -#### Actions queue CLI - -The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. - -The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: - -- Action added to the queue by the 3rd party optimizer tool or indexer-cli user -- Indexer can use the `indexer-cli` to view all queued actions -- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. -- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. -- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. -- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. - -نموذج البيانات Data model: - -```graphql -Type ActionInput { - status: ActionStatus - type: ActionType - deploymentID: string | null - allocationID: string | null - amount: string | null - poi: string | null - force: boolean | null - source: string - reason: string | null - priority: number | null -} - -ActionStatus { - queued - approved - pending - success - failed - canceled -} - -ActionType { - allocate - unallocate - reallocate - collect -} -``` - -Example usage from source: - -```bash -graph indexer actions get all - -graph indexer actions get --status queued - -graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 - -graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 - -graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae - -graph indexer actions cancel - -graph indexer actions approve 1 3 5 - -graph indexer actions execute approve -``` - -Note that supported action types for allocation management have different input requirements: - -- `Allocate` - allocate stake to a specific subgraph deployment - - - required action params: - - deploymentID - - amount - -- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - - required action params: - - allocationID - - deploymentID - - optional action params: - - poi - - force (forces using the provided POI even if it doesn’t match what the graph-node provides) - -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - - required action params: - - allocationID - - deploymentID - - amount - - optional action params: - - poi - - force (forces using the provided POI even if it doesn’t match what the graph-node provides) - -#### نماذج التكلفة Cost models - -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. - -#### Agora - -توفر لغة Agora تنسيقا مرنا للإعلان عن نماذج التكلفة للاستعلامات. نموذج سعر Agora هو سلسلة من العبارات التي يتم تنفيذها بالترتيب لكل استعلام عالي المستوى في GraphQL. بالنسبة إلى كل استعلام عالي المستوى top-level ، فإن العبارة الأولى التي تتطابق معه تحدد سعر هذا الاستعلام. - -تتكون العبارة من المسند predicate ، والذي يستخدم لمطابقة استعلامات GraphQL وتعبير التكلفة والتي عند تقييم النواتج تكون التكلفة ب GRT عشري. قيم الاستعلام الموجودة في ال argument ،قد يتم تسجيلها في المسند predicate واستخدامها في التعبير expression. يمكن أيضًا تعيين Globals وتعويضه في التعبير expression. - -مثال لتكلفة الاستعلام باستخدام النموذج أعلاه: - -``` -# This statement captures the skip value, -# uses a boolean expression in the predicate to match specific queries that use `skip` -# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global -query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; - -# This default will match any GraphQL expression. -# It uses a Global substituted into the expression to calculate cost -default => 0.1 * $SYSTEM_LOAD; -``` - -مثال على نموذج التكلفة: - -| الاستعلام | السعر | -| ---------------------------------------------------------------------------- | ------- | -| { pairs(skip: 5000) { id } } | 0.5 GRT | -| { tokens { symbol } } | 0.1 GRT | -| { pairs(skip: 5000) { id } tokens { symbol } } | 0.6 GRT | - -#### تطبيق نموذج التكلفة - -Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. - -```sh -'indexer cost set variables '{ "SYSTEM_LOAD": 1.4 } -indexer cost set model my_model.agora -``` - -## التفاعل مع الشبكة - -### Stake in the protocol - -The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. - -> Note: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools). - -Once an Indexer has staked GRT in the protocol, the [Indexer components](/network/indexing#indexer-components) can be started up and begin their interactions with the network. - -#### اعتماد التوكن tokens - -1. افتح [ تطبيق Remix ](https://remix.ethereum.org/) على المتصفح - -2. في `File Explorer` أنشئ ملفا باسم ** GraphToken.abi ** باستخدام [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). - -3. With `GraphToken.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. - -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. - -5. قم بتعيين عنوان GraphToken - الصق العنوان (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) بجوار `At Address` وانقر على الزر `At address` لتطبيق ذلك. - -6. استدعي دالة `approve(spender, amount)` للموافقة على عقد Staking. املأ `spender` بعنوان عقد Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) واملأ `amount` بالتوكن المراد عمل staking لها (في wei). - -#### Stake tokens - -1. افتح [ تطبيق Remix ](https://remix.ethereum.org/) على المتصفح - -2. في `File Explorer` أنشئ ملفا باسم ** Staking.abi ** باستخدام Staking ABI. - -3. With `Staking.abi` selected and open in the editor, switch to the `Deploy and run transactions` section in the Remix interface. - -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. - -5. عيّن عنوان عقد Staking - الصق عنوان عقد Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) بجوار `At address` وانقر على الزر `At address` لتطبيق ذلك. - -6. استدعي `stake()` لوضع GRT في البروتوكول. - -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. - -8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. - -``` -setDelegationParameters(950000, 600000, 500) -``` - -### Setting delegation parameters - -The `setDelegationParameters()` function in the [staking contract](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol) is essential for Indexers, allowing them to set parameters that define their interactions with Delegators, influencing their reward sharing and delegation capacity. - -### How to set delegation parameters - -To set the delegation parameters using Graph Explorer interface, follow these steps: - -1. Navigate to [Graph Explorer](https://thegraph.com/explorer/). -2. Connect your wallet. Choose multisig (such as Gnosis Safe) and then select mainnet. Note: You will need to repeat this process for Arbitrum One. -3. Connect the wallet you have as a signer. -4. Navigate to the 'Settings' section and select 'Delegation Parameters'. These parameters should be configured to achieve an effective cut within the desired range. Upon entering values in the provided input fields, the interface will automatically calculate the effective cut. Adjust these values as necessary to attain the desired effective cut percentage. -5. Submit the transaction to the network. - -> Note: This transaction will need to be confirmed by the multisig wallet signers. - -### عمر التخصيص allocation - -After being created by an Indexer a healthy allocation goes through two states. - -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L316)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. - -- **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/main/packages/contracts/contracts/staking/Staking.sol#L335)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators ([learn more](/network/indexing/#how-are-indexing-rewards-distributed)). - -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/ar/network/overview.mdx b/website/pages/ar/network/overview.mdx deleted file mode 100644 index c6fdf2fdc81f..000000000000 --- a/website/pages/ar/network/overview.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Network Overview ---- - -The Graph Network is a decentralized indexing protocol for organizing blockchain data. - -## How does it work? - -Applications use [GraphQL](/querying/graphql-api/) to query open APIs called subgraphs and retrieve data that is indexed on the network. With The Graph, developers can build serverless applications that run entirely on public infrastructure. - -## Specifics - -The Graph Network consists of Indexers, Curators, and Delegators that provide services to the network and serve data to web3 applications. - -![اقتصاد الـ Token](/img/Network-roles@2x.png) - -### Economics - -To ensure economic security of The Graph Network and the integrity of data being queried, participants stake and use Graph Tokens ([GRT](/tokenomics)). GRT is a work utility token that is an ERC-20, which is used to allocate resources in the network. - -Active Indexers, Curators, and Delegators can provide services and earn income from the network. The income they earn is proportional to the amount of work they perform and their GRT stake. diff --git a/website/pages/ar/new-chain-integration.mdx b/website/pages/ar/new-chain-integration.mdx deleted file mode 100644 index 75df818160ce..000000000000 --- a/website/pages/ar/new-chain-integration.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: New Chain Integration ---- - -Chains can bring subgraph support to their ecosystem by starting a new `graph-node` integration. Subgraphs are a powerful indexing tool opening a world of possibilities for developers. Graph Node already indexes data from the chains listed here. If you are interested in a new integration, there are 2 integration strategies: - -1. **EVM JSON-RPC** -2. **Firehose**: All Firehose integration solutions include Substreams, a large-scale streaming engine based off Firehose with native `graph-node` support, allowing for parallelized transforms. - -> Note that while the recommended approach is to develop a new Firehose for all new chains, it is only required for non-EVM chains. - -## Integration Strategies - -### 1. EVM JSON-RPC - -If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. - -#### اختبار استدعاء إجراء عن بُعد باستخدام تمثيل كائنات جافا سكريبت لآلة الإيثريوم الافتراضية (EVM JSON-RPC) - -For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON-RPC methods: - -- `eth_getLogs` -- `eth_call` (for historical blocks, with EIP-1898 - requires archive node) -- `eth_getBlockByNumber` -- `eth_getBlockByHash` -- `net_version` -- `eth_getTransactionReceipt`، ضمن طلب دفعة استدعاء الإجراء عن بُعد باستخدام تمثيل كائنات جافا سكريبت -- `trace_filter` *(optionally required for Graph Node to support call handlers)* - -### 2. Firehose Integration - -[Firehose](https://firehose.streamingfast.io/firehose-setup/overview) is a next-generation extraction layer. It collects history in flat files and streams in real time. Firehose technology replaces those polling API calls with a stream of data utilizing a push model that sends data to the indexing node faster. This helps increase the speed of syncing and indexing. - -The primary method to integrate the Firehose into chains is to use an RPC polling strategy. Our polling algorithm will predict when a new block will arrive and increase the rate at which it checks for a new block near that time, making it a very low-latency and efficient solution. For help with the integration and maintenance of the Firehose, contact the [StreamingFast team](https://www.streamingfast.io/firehose-integration-program). New chains and their integrators will appreciate the [fork awareness](https://substreams.streamingfast.io/documentation/consume/reliability-guarantees) and massive parallelized indexing capabilities that Firehose and Substreams bring to their ecosystem. - -> NOTE: All integrations done by the StreamingFast team include maintenance for the Firehose replication protocol into the chain's codebase. StreamingFast tracks any changes and releases binaries when you change code and when StreamingFast changes code. This includes releasing Firehose/Substreams binaries for the protocol, maintaining Substreams modules for the block model of the chain, and releasing binaries for the blockchain node with instrumentation if need be. - -#### Specific Firehose Instrumentation for EVM (`geth`) chains - -For EVM chains, there exists a deeper level of data that can be achieved through the `geth` [live-tracer](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.0), a collaboration between Go-Ethereum and StreamingFast, in building a high-throughput and rich transaction tracing system. The Live Tracer is the most comprehensive solution, resulting in [Extended](https://streamingfastio.medium.com/new-block-model-to-accelerate-chain-integration-9f65126e5425) block details. This enables new indexing paradigms, like pattern matching of events based on state changes, calls, parent call trees, or triggering of events based on changes to the actual variables in a smart contract. - -![Base block vs Extended block](/img/extended-vs-base-substreams-blocks.png) - -> NOTE: This improvement upon the Firehose requires chains make use of the EVM engine `geth version 1.13.0` and up. - -## EVM considerations - Difference between JSON-RPC & Firehose - -While the JSON-RPC and Firehose are both suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](https://substreams.streamingfast.io). Supporting Substreams allows developers to build [Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs) for the new chain, and has the potential to improve the performance of your subgraphs. Additionally, Firehose — as a drop-in replacement for the JSON-RPC extraction layer of `graph-node` — reduces by 90% the number of RPC calls required for general indexing. - -- All those `getLogs` calls and roundtrips get replaced by a single stream arriving into the heart of `graph-node`; a single block model for all subgraphs it processes. - -> NOTE: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that `eth_calls` are not a good practice for developers) - -## تكوين عقدة الغراف - -Configuring Graph Node is as easy as preparing your local environment. Once your local environment is set, you can test the integration by locally deploying a subgraph. - -1. [استنسخ عقدة الغراف](https://github.com/graphprotocol/graph-node) - -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON-RPC compliant URL - - > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. - -3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ - -### Testing an EVM JSON-RPC by locally deploying a subgraph - -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. قم بإنشاء مثالًا بسيطًا للغراف الفرعي. بعض الخيارات المتاحة هي كالتالي: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing `dataSources.network` to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` - -إذا لم تكن هناك أخطاء يجب أن يقوم عقدة الغراف بمزامنة الغراف الفرعي المنشور. قم بمنحه بعض الوقت لإتمام عملية المزامنة، ثم قم بإرسال بعض استعلامات لغة الإستعلام للغراف (GraphQL) إلى نقطة نهاية واجهة برمجة التطبيقات الموجودة في السجلات. - -## Substreams-powered Subgraphs - -For StreamingFast-led Firehose/Substreams integrations, basic support for foundational Substreams modules (e.g. decoded transactions, logs and smart-contract events) and Substreams codegen tools are included. These tools enable the ability to enable [Substreams-powered subgraphs](/sps/introduction). Follow the [How-To Guide](https://substreams.streamingfast.io/documentation/how-to-guides/intro-your-first-application) and run `substreams codegen subgraph` to experience the codegen tools for yourself. diff --git a/website/pages/ar/publishing/_meta.js b/website/pages/ar/publishing/_meta.js deleted file mode 100644 index eb06f56f912a..000000000000 --- a/website/pages/ar/publishing/_meta.js +++ /dev/null @@ -1,5 +0,0 @@ -import meta from '../../en/publishing/_meta.js' - -export default { - ...meta, -} diff --git a/website/pages/ar/publishing/publishing-a-subgraph.mdx b/website/pages/ar/publishing/publishing-a-subgraph.mdx deleted file mode 100644 index 673160e705f0..000000000000 --- a/website/pages/ar/publishing/publishing-a-subgraph.mdx +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Publishing a Subgraph to the Decentralized Network ---- - -Once you have [deployed your subgraph to Subgraph Studio](/deploying/deploying-a-subgraph-to-studio) and it's ready to go into production, you can publish it to the decentralized network. - -When you publish a subgraph to the decentralized network, you make it available for: - -- [Curators](/network/curating) to begin curating it. -- [Indexers](/network/indexing) to begin indexing it. - - - -Check out the list of [supported networks](/developing/supported-networks). - -## Publishing from Subgraph Studio - -1. Go to the [Subgraph Studio](https://thegraph.com/studio/) dashboard -2. Click on the **Publish** button -3. Your subgraph will now be visible in [Graph Explorer](https://thegraph.com/explorer/). - -All published versions of an existing subgraph can: - -- Be published to Arbitrum One. [Learn more about The Graph Network on Arbitrum](/arbitrum/arbitrum-faq). - -- Index data on any of the [supported networks](/developing/supported-networks), regardless of the network on which the subgraph was published. - -### Updating metadata for a published subgraph - -- After publishing your subgraph to the decentralized network, you can update the metadata anytime in Subgraph Studio. -- Once you’ve saved your changes and published the updates, they will appear in Graph Explorer. -- It's important to note that this process will not create a new version since your deployment has not changed. - -## Publishing from the CLI - -As of version 0.73.0, you can also publish your subgraph with the [`graph-cli`](https://github.com/graphprotocol/graph-tooling/tree/main/packages/cli). - -1. Open the `graph-cli`. -2. Use the following commands: `graph codegen && graph build` then `graph publish`. -3. A window will open, allowing you to connect your wallet, add metadata, and deploy your finalized subgraph to a network of your choice. - -![cli-ui](/img/cli-ui.png) - -### Customizing your deployment - -You can upload your subgraph build to a specific IPFS node and further customize your deployment with the following flags: - -``` -USAGE - $ graph publish [SUBGRAPH-MANIFEST] [-h] [--protocol-network arbitrum-one|arbitrum-sepolia --subgraph-id ] [-i ] [--ipfs-hash ] [--webapp-url - ] - -FLAGS - -h, --help Show CLI help. - -i, --ipfs= [default: https://api.thegraph.com/ipfs/api/v0] Upload build results to an IPFS node. - --ipfs-hash= IPFS hash of the subgraph manifest to deploy. - --protocol-network=