diff --git a/.github/workflows/cron-staging.yml b/.github/workflows/cron-staging.yml index 3176c910cb..844c7683f5 100644 --- a/.github/workflows/cron-staging.yml +++ b/.github/workflows/cron-staging.yml @@ -5,12 +5,12 @@ on: workflow_dispatch: jobs: - terra-main-tests: + qiskit-main-tests: name: tests-python${{ matrix.python-version }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: - python-version: [3.8, 3.9, "3.10", "3.11"] + python-version: [3.8, "3.11"] os: ["ubuntu-latest", "macOS-latest", "windows-latest"] steps: - name: Print Concurrency Group @@ -36,11 +36,43 @@ jobs: ${{ runner.os }}-${{ matrix.python-version }} - name: Install Deps run: python -m pip install -U tox setuptools virtualenv wheel - - name: Install and Run Tests - run: tox -e terra-main + - name: Install and Run Tests (Windows and Linux) + run: tox -e qiskit-main if: runner.os != 'macOS' - - name: Install and Run Tests - run: tox -e terra-main + - name: Install and Run Tests (Macs only) + run: tox -e qiskit-main if: runner.os == 'macOS' env: - OMP_NUM_THREADS: 1 \ No newline at end of file + TEST_TIMEOUT: 120 + OMP_NUM_THREADS: 1 + docs: + name: docs + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Set up Python 3.11 + uses: actions/setup-python@v4 + with: + python-version: 3.11 + - name: Pip cache + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-docs-${{ hashFiles('setup.py','requirements.txt','requirements-extras.txt','requirements-dev.txt','constraints.txt') }} + - name: Install Deps + run: | + python -m pip install -U tox + sudo apt-get install -y pandoc graphviz + - name: Build Docs + run: tox -edocs-qiskit-main + - name: Compress Artifacts + run: | + mkdir artifacts + tar -Jcvf html_docs.tar.xz docs/_build/html + mv html_docs.tar.xz artifacts/. + - uses: actions/upload-artifact@v3 + with: + name: html_docs + path: artifacts diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0649ab9d55..a0f990eaab 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -15,7 +15,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: [3.8, 3.9, "3.10", "3.11"] + python-version: [3.8, "3.11"] os: ["ubuntu-latest", "macOS-latest", "windows-latest"] steps: - name: Print Concurrency Group @@ -39,25 +39,27 @@ jobs: ${{ runner.os }}-${{ matrix.python-version }}-pip-tests- ${{ runner.os }}-${{ matrix.python-version }}-pip- ${{ runner.os }}-${{ matrix.python-version }} + - name: Stestr cache + uses: actions/cache@v3 + with: + path: .stestr + key: stestr-${{ runner.os }}-${{ matrix.python-version }} + restore-keys: | + stestr-${{ runner.os }}- + stestr- - name: Install Deps - run: python -m pip install -U "tox==3.27.1" setuptools virtualenv wheel + run: python -m pip install -U tox setuptools virtualenv wheel stestr - name: Install and Run Tests (Windows and Linux) run: tox -e py if: runner.os != 'macOS' - name: Install and Run Tests (Macs only) - run: tox -e cover + run: tox -e py if: runner.os == 'macOS' env: + TEST_TIMEOUT: 120 OMP_NUM_THREADS: 1 - - name: Report coverage to coveralls.io (Macs only) - if: runner.os == 'macOS' - uses: coverallsapp/github-action@v2 - env: - ACTIONS_RUNNER_DEBUG: 1 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - flag-name: unit-tests_python${{ matrix.python-version }}-${{ matrix.os }} - path-to-lcov: coverage.lcov + - name: Clean up stestr cache + run: stestr history remove all lint: name: lint diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ddd408f251..8c7a18ce1d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -7,6 +7,9 @@ jobs: wheel-build: name: Build and Publish Release Artifacts runs-on: ubuntu-latest + environment: release + permissions: + id-token: write steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 @@ -14,7 +17,7 @@ jobs: with: python-version: '3.8' - name: Install Deps - run: pip install -U twine wheel + run: pip install -U wheel - name: Build Artifacts run: | python setup.py sdist @@ -24,7 +27,4 @@ jobs: with: path: ./dist/qiskit* - name: Publish to PyPi - env: - TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} - TWINE_USERNAME: qiskit - run: twine upload dist/qiskit* + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.pylintrc b/.pylintrc index 86e005682e..22f47fd59f 100644 --- a/.pylintrc +++ b/.pylintrc @@ -70,7 +70,10 @@ disable=fixme, # disabled as TODOs would show up as warnings no-else-return, # relax "elif" after a clause with a return docstring-first-line-empty, # relax docstring style import-outside-toplevel, - assigning-non-slot # https://github.com/Qiskit/qiskit-terra/pull/7347#issuecomment-985007311 + cyclic-import, # This checker raises on all module pairs that import each other, + # even submodules that only import already loaded objects from a + # parent module, a common pattern in qiskit-experiments. + assigning-non-slot # https://github.com/Qiskit/qiskit/pull/7347#issuecomment-985007311 diff --git a/.stestr.conf b/.stestr.conf index 55302a1601..460ee43afa 100644 --- a/.stestr.conf +++ b/.stestr.conf @@ -1,2 +1,3 @@ [DEFAULT] test_path=./test +parallel_class=True \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 20412c851b..cf33b8ffa7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -104,27 +104,25 @@ https://stestr.readthedocs.io/en/stable/MANUAL.html#test-selection If you want to run a single test module, test class, or individual test method you can do this faster with the `-n`/`--no-discover` option. For example, to run a module: ``` -tox -- -n test.python.test_examples +tox -epy310 -- -n test.framework.test_composite ``` -Or to run the same module by path: -``` -tox -- -n test/python/test_examples.py -``` To run a class: - ``` -tox -- -n test.python.test_examples.TestPythonExamples +tox -epy310 -- -n test.framework.test_composite.TestCompositeExperimentData ``` + To run a method: ``` -tox -- -n test.python.test_examples.TestPythonExamples.test_all_examples +tox -epy310 -- -n test.framework.test_composite.TestCompositeExperimentData.test_composite_save_load ``` +Note that tests will fail automatically if they do not finish execution within 60 seconds. + #### STDOUT/STDERR and logging capture -When running tests in parallel using `stestr` either via tox, the Makefile (`make -test_ci`), or in CI, we set the env variable `QISKIT_TEST_CAPTURE_STREAMS`, which will +When running tests in parallel using `stestr` either via tox +or in CI, we set the env variable `QISKIT_TEST_CAPTURE_STREAMS`, which will capture any text written to stdout, stderr, and log messages and add them as attachments to the tests run so output can be associated with the test case it originated from. However, if you run tests with `stestr` outside of these mechanisms, by default the @@ -136,6 +134,18 @@ stdlib unittest runner, a similar result can be accomplished by using the [`--buffer`](https://docs.python.org/3/library/unittest.html#command-line-options) option (e.g. `python -m unittest discover --buffer ./test/python`). +#### Other testing related settings + +The test code defines some environment variables that may occasionally be useful to set: + ++ `TEST_TIMEOUT`: An integer representing the maximum time a test can take + before it is considered a failure. ++ `QE_USE_TESTTOOLS`: Set this variable to `FALSE`, `0`, or `NO` to have the + tests use `unittest.TestCase` as the base class. Otherwise, the default is +`testtools.TestCase` which is an extension of `unittest.TestCase`. In some +situations, a developer may wish to use a workflow that is not compatible with +the `testtools` extensions. + ### Code style The qiskit-experiments repository uses `black` for code formatting and style and diff --git a/README.md b/README.md index f165a1c865..b9a3e8020c 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,6 @@ [![License](https://img.shields.io/github/license/Qiskit-Extensions/qiskit-experiments.svg)](https://opensource.org/licenses/Apache-2.0) [![Release](https://img.shields.io/github/release/Qiskit-Extensions/qiskit-experiments.svg)](https://github.com/Qiskit-Extensions/qiskit-experiments/releases) ![Python](https://img.shields.io/pypi/pyversions/qiskit-experiments.svg) -[![Coverage Status](https://coveralls.io/repos/github/Qiskit-Extensions/qiskit-experiments/badge.svg?branch=main)](https://coveralls.io/github/Qiskit-Extensions/qiskit-experiments?branch=main) [![DOI](https://joss.theoj.org/papers/10.21105/joss.05329/status.svg)](https://doi.org/10.21105/joss.05329) **Qiskit Experiments** is a repository that builds tools for building, running, diff --git a/constraints.txt b/constraints.txt index cd066084df..e69de29bb2 100644 --- a/constraints.txt +++ b/constraints.txt @@ -1,4 +0,0 @@ -# Numpy 1.25 deprecated some behaviours that we used, and caused some -# tests to flake. See https://github.com/Qiskit/qiskit-terra/issues/10305, -# remove pin when resolving that. -numpy<1.25 \ No newline at end of file diff --git a/docs/_ext/custom_styles/formatter.py b/docs/_ext/custom_styles/formatter.py index a218f9476e..5a3b2d45af 100644 --- a/docs/_ext/custom_styles/formatter.py +++ b/docs/_ext/custom_styles/formatter.py @@ -38,7 +38,7 @@ def format_header(self, lines: List[str]) -> List[str]: def format_overview(self, lines: List[str]) -> List[str]: """Format overview section.""" format_lines = [ - "" + "", ".. rubric:: Overview", "", ] @@ -167,7 +167,7 @@ def format_analysis_opts(self, lines: List[str]) -> List[str]: format_lines = [ ".. rubric:: Analysis options", "", - "These are the keyword arguments of :meth:`run` method.", + "These are the keyword arguments of the :meth:`run` method.", "", ] for line in _write_options(lines, self.indent): diff --git a/docs/_ext/custom_styles/option_parser.py b/docs/_ext/custom_styles/option_parser.py index bb11b5645b..da9b1e4c70 100644 --- a/docs/_ext/custom_styles/option_parser.py +++ b/docs/_ext/custom_styles/option_parser.py @@ -26,7 +26,7 @@ from sphinx.ext.napoleon import GoogleDocstring -_parameter_doc_regex = re.compile(r'(.+?)\(\s*(.*[^\s]+)\s*\):(.*[^\s]+)') +_parameter_doc_regex = re.compile(r"(.+?)\(\s*(.*[^\s]+)\s*\):(.*[^\s]+)") class QiskitExperimentsOptionsDocstring(GoogleDocstring): @@ -201,8 +201,11 @@ def _value_repr(value: Any) -> str: return f"{{{dict_repr}}}" if value.__class__.__module__ == "builtins": return f":obj:`{value}`" - if value.__class__.__module__.startswith("qiskit"): + if value.__class__.__module__ and value.__class__.__module__.startswith("qiskit"): return f"Instance of :class:`.{value.__class__.__name__}`" + # for singleton gates that don't have directly accessible module names + if hasattr(value, "base_class") and value.base_class.__module__.startswith("qiskit"): + return f"Instance of :class:`.{value.base_class.__name__}`" if callable(value): return f"Callable :func:`{value.__name__}`" if isinstance(value, np.ndarray): diff --git a/docs/_templates/autosummary/analysis.rst b/docs/_templates/autosummary/analysis.rst index 222df215af..d8811523ac 100644 --- a/docs/_templates/autosummary/analysis.rst +++ b/docs/_templates/autosummary/analysis.rst @@ -17,11 +17,9 @@ .. rubric:: Attributes - .. autosummary:: - :toctree: ../stubs/ {% for item in all_attributes %} {%- if not item.startswith('_') %} - {{ name }}.{{ item }} + .. autoattribute:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} {% endif %} @@ -32,16 +30,14 @@ .. rubric:: Methods - .. autosummary:: - :toctree: ../stubs/ {% for item in all_methods %} {%- if not item.startswith('_') or item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - {{ name }}.{{ item }} + .. automethod:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} {% for item in inherited_members %} {%- if item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - {{ name }}.{{ item }} + .. automethod:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} diff --git a/docs/_templates/autosummary/class.rst b/docs/_templates/autosummary/class.rst index f6e8a53f58..fd25d5706e 100644 --- a/docs/_templates/autosummary/class.rst +++ b/docs/_templates/autosummary/class.rst @@ -12,58 +12,37 @@ :no-inherited-members: :no-special-members: - {% block attributes_summary %} - {% if attributes %} +{% block attributes_summary %} - {# This counter lets us only render the heading if there's at least - one valid entry. #} - {% set count = namespace(value=0) %} + {% set wanted_attributes = [] %} + {% for item in attributes%} + {%- if not item.startswith('_') %} + {% set _ = wanted_attributes.append(item)%} + {%- endif -%} + {%- endfor %} - {% for item in attributes %} - {% if not item.startswith('_') %} - {% set count.value = count.value + 1 %} - {% if count.value == 1 %} + {% if wanted_attributes %} .. rubric:: Attributes - - .. autosummary:: - :toctree: ../stubs/ - {% endif %} - - {{ name }}.{{ item }} - {% endif %} - {% endfor %} + {% for item in wanted_attributes %} + .. autoattribute:: {{ name }}.{{ item }} + {%- endfor %} {% endif %} - {% endblock %} +{% endblock %} - {% block methods_summary %} - {% if methods %} +{% block methods_summary %} - {% set count = namespace(value=0) %} + {% set wanted_methods = [] %} {% for item in all_methods %} - {%- if not item.startswith('_') or item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - {% set count.value = count.value + 1 %} - {% if count.value == 1 %} - .. rubric:: Methods - - .. autosummary:: - :toctree: ../stubs/ - {% endif %} - {{ name }}.{{ item }} + {% set _ = wanted_methods.append(item)%} {%- endif -%} {%- endfor %} - {% for item in inherited_members %} - {%- if item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - {% set count.value = count.value + 1 %} - {% if count.value == 1 %} - .. rubric:: Methods - .. autosummary:: - :toctree: ../stubs/ - {% endif %} - {{ name }}.{{ item }} - {%- endif -%} + {% if wanted_methods%} + .. rubric:: Methods + {% for item in wanted_methods %} + .. automethod:: {{ name }}.{{ item }} {%- endfor %} {% endif %} - {% endblock %} +{% endblock %} \ No newline at end of file diff --git a/docs/_templates/autosummary/class_no_inherited_members.rst b/docs/_templates/autosummary/class_no_inherited_members.rst new file mode 100644 index 0000000000..9e3b9339c9 --- /dev/null +++ b/docs/_templates/autosummary/class_no_inherited_members.rst @@ -0,0 +1,52 @@ +{# This is identical to class.rst, except for the filtering of the inherited_members. -#} + +{% if referencefile %} +.. include:: {{ referencefile }} +{% endif %} + +{{ objname }} +{{ underline }} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :no-members: + :no-inherited-members: + :no-special-members: + +{% block attributes_summary %} + + {% set wanted_attributes = [] %} + {% for item in attributes%} + {%- if not item.startswith('_') %} + {% set _ = wanted_attributes.append(item)%} + {%- endif -%} + {%- endfor %} + + {% if wanted_attributes%} + .. rubric:: Attributes + {% for item in wanted_attributes %} + .. autoattribute:: {{ name }}.{{ item }} + {%- endfor %} + {% endif %} +{% endblock %} + +{% block methods_summary %} + + {% set wanted_methods = [] %} + {% for item in all_methods %} + {%- if item not in inherited_members %} + {%- if not item.startswith('_') or item in ['__call__', '__mul__', '__getitem__', '__len__'] %} + {% set _ = wanted_methods.append(item)%} + {%- endif -%} + {%- endif -%} + {%- endfor %} + + {% if wanted_methods %} + .. rubric:: Methods + {% for item in wanted_methods %} + .. automethod:: {{ name }}.{{ item }} + {%- endfor %} + + {% endif %} +{% endblock %} \ No newline at end of file diff --git a/docs/_templates/autosummary/drawer.rst b/docs/_templates/autosummary/drawer.rst index a17d57f6da..03501a5cd0 100644 --- a/docs/_templates/autosummary/drawer.rst +++ b/docs/_templates/autosummary/drawer.rst @@ -17,11 +17,9 @@ .. rubric:: Attributes - .. autosummary:: - :toctree: ../stubs/ {% for item in all_attributes %} {%- if not item.startswith('_') %} - {{ name }}.{{ item }} + .. autoattribute:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} {% endif %} @@ -32,16 +30,14 @@ .. rubric:: Methods - .. autosummary:: - :toctree: ../stubs/ {% for item in all_methods %} {%- if not item.startswith('_') or item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - {{ name }}.{{ item }} + .. automethod:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} {% for item in inherited_members %} {%- if item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - {{ name }}.{{ item }} + .. automethod:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} diff --git a/docs/_templates/autosummary/experiment.rst b/docs/_templates/autosummary/experiment.rst index 01800ea10b..0de1a1c3c3 100644 --- a/docs/_templates/autosummary/experiment.rst +++ b/docs/_templates/autosummary/experiment.rst @@ -17,11 +17,9 @@ .. rubric:: Attributes - .. autosummary:: - :toctree: ../stubs/ {% for item in all_attributes %} {%- if not item.startswith('_') %} - {{ name }}.{{ item }} + .. autoattribute:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} {% endif %} @@ -32,16 +30,14 @@ .. rubric:: Methods - .. autosummary:: - :toctree: ../stubs/ {% for item in all_methods %} {%- if not item.startswith('_') or item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - {{ name }}.{{ item }} + .. automethod:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} {% for item in inherited_members %} {%- if item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - {{ name }}.{{ item }} + .. automethod:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} diff --git a/docs/_templates/autosummary/plotter.rst b/docs/_templates/autosummary/plotter.rst index 83e19addbe..0b5cdeefaa 100644 --- a/docs/_templates/autosummary/plotter.rst +++ b/docs/_templates/autosummary/plotter.rst @@ -17,11 +17,9 @@ .. rubric:: Attributes - .. autosummary:: - :toctree: ../stubs/ {% for item in all_attributes %} {%- if not item.startswith('_') %} - {{ name }}.{{ item }} + .. autoattribute:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} {% endif %} @@ -32,17 +30,14 @@ .. rubric:: Methods - .. autosummary:: - :toctree: ../stubs/ - {% for item in all_methods %} {%- if not item.startswith('_') or item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - {{ name }}.{{ item }} + .. automethod:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} {% for item in inherited_members %} {%- if item in ['__call__', '__mul__', '__getitem__', '__len__'] %} - {{ name }}.{{ item }} + .. automethod:: {{ name }}.{{ item }} {%- endif -%} {%- endfor %} diff --git a/docs/conf.py b/docs/conf.py index 527c1b7da0..b2165afb6b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -174,6 +174,7 @@ "qiskit_ibm_provider": ("https://qiskit.org/ecosystem/ibm-provider/", None), "qiskit_aer": ("https://qiskit.org/ecosystem/aer", None), "qiskit_dynamics": ("https://qiskit.org/documentation/dynamics", None), + "qiskit_ibm_runtime": ("https://qiskit.org/ecosystem/ibm-runtime/", None), } diff --git a/docs/howtos/cloud_service.rst b/docs/howtos/cloud_service.rst index 5f06517eff..a244232ca7 100644 --- a/docs/howtos/cloud_service.rst +++ b/docs/howtos/cloud_service.rst @@ -4,7 +4,7 @@ Save and load experiment data with the cloud service .. note:: This guide is only for those who have access to the cloud service. You can check whether you do by logging into the IBM Quantum interface - and seeing if you can see the `database `__. + and seeing if you can see the `database `__. Problem ------- @@ -44,13 +44,13 @@ backend and not a simulator to be able to save the experiment data. This is done .. jupyter-output:: You can view the experiment online at - https://quantum-computing.ibm.com/experiments/10a43cb0-7cb9-41db-ad74-18ea6cf63704 + https://quantum.ibm.com/experiments/10a43cb0-7cb9-41db-ad74-18ea6cf63704 Loading ~~~~~~~ Let's load a `previous T1 -experiment `__ +experiment `__ (requires login to view), which we've made public by editing the ``Share level`` field: .. jupyter-input:: @@ -143,7 +143,7 @@ The :meth:`~.ExperimentData.auto_save` feature automatically saves changes to th .. jupyter-output:: - You can view the experiment online at https://quantum-computing.ibm.com/experiments/cdaff3fa-f621-4915-a4d8-812d05d9a9ca + You can view the experiment online at https://quantum.ibm.com/experiments/cdaff3fa-f621-4915-a4d8-812d05d9a9ca Setting ``auto_save = True`` works by triggering :meth:`.ExperimentData.save`. @@ -188,12 +188,5 @@ Web interface ~~~~~~~~~~~~~ You can also view experiment results as well as change the tags and share level at the `IBM Quantum Experiments -pane `__ -on the cloud. The documentation below explains how to view, search, and share experiment -data entries. - -See also --------- - -* `Experiments web interface documentation `__ - +pane `__ +on the cloud. diff --git a/docs/howtos/experiment_times.rst b/docs/howtos/experiment_times.rst index bc1e331d2c..8be8b1dc8b 100644 --- a/docs/howtos/experiment_times.rst +++ b/docs/howtos/experiment_times.rst @@ -24,7 +24,7 @@ are all of type ``datetime.datetime`` and in your local timezone: .. note:: The below attributes are only relevant for those who have access to the cloud service. You can check whether you do by logging into the IBM Quantum interface - and seeing if you can see the `database `__. + and seeing if you can see the `database `__. - :attr:`.ExperimentData.creation_datetime` is the time when the experiment data was saved via the service. This defaults to ``None`` if experiment data has not yet been saved. diff --git a/docs/howtos/figure_generation.rst b/docs/howtos/figure_generation.rst new file mode 100644 index 0000000000..be346a1144 --- /dev/null +++ b/docs/howtos/figure_generation.rst @@ -0,0 +1,45 @@ +Control figure generation +========================= + +Problem +------- + +You want to change the default behavior where figures are generated with every experiment. + +Solution +-------- + +For a single non-composite experiment, figure generation can be switched off by setting the analysis +option ``plot`` to ``False``: + +.. jupyter-input:: + + experiment.analysis.set_options(plot = False) + +For composite experiments, there is a ``generate_figures`` analysis option which controls how child figures are +generated. There are three options: + +- ``always``: The default behavior, generate figures for each child experiment. +- ``never``: Never generate figures for any child experiment. +- ``selective``: Only generate figures for analysis results where ``quality`` is ``bad``. This is useful + for large composite experiments where you only want to examine qubits with problems. + +This parameter should be set on the analysis of a composite experiment before the analysis runs: + +.. jupyter-input:: + + parallel_exp = ParallelExperiment( + [T1(physical_qubits=(i,), delays=delays) for i in range(2)] + ) + parallel_exp.analysis.set_options(generate_figures="selective") + +Discussion +---------- + +These options are useful for large composite experiments, where generating all figures incurs a significant +overhead. + +See Also +-------- + +* The `Visualization tutorial `_ discusses how to customize figures diff --git a/docs/howtos/rerun_analysis.rst b/docs/howtos/rerun_analysis.rst index 2f7a1b33d6..b1a7107b6f 100644 --- a/docs/howtos/rerun_analysis.rst +++ b/docs/howtos/rerun_analysis.rst @@ -37,12 +37,14 @@ job IDs: expdata = ExperimentData(experiment = experiment) expdata.add_jobs([provider.retrieve_job(job_id) for job_id in job_ids]) - experiment.analysis.run(expdata) + experiment.analysis.run(expdata, replace_results=True) # Block execution of subsequent code until analysis is complete expdata.block_for_results() -``expdata`` will be the new experiment data object containing results of the rerun analysis. +``expdata`` will be the new experiment data object containing results of the rerun analysis. Note that if +``replace_results`` isn't set, running the analysis will return a new :class:`.ExperimentData` object +instead of overwriting the existing one. If you have the job data in the form of a :class:`~qiskit.result.Result` object, you can invoke the :meth:`.add_data` method instead of :meth:`.add_jobs`: @@ -66,7 +68,7 @@ contain correct results. In the case where jobs are not directly accessible from the provider but you've downloaded the jobs from the -`IQS dashboard `_, you can load them from +`IQS dashboard `_, you can load them from the downloaded directory into :class:`~qiskit.result.Result` objects with this code: .. jupyter-input:: @@ -115,7 +117,7 @@ first component experiment. data = ExperimentData(experiment=pexp) data.add_jobs([provider.retrieve_job(job_id) for job_id in job_ids]) - pexp.analysis.run(data) + pexp.analysis.run(data, replace_results=True) See Also -------- diff --git a/docs/howtos/runtime_sessions.rst b/docs/howtos/runtime_sessions.rst new file mode 100644 index 0000000000..a9519ba3a2 --- /dev/null +++ b/docs/howtos/runtime_sessions.rst @@ -0,0 +1,41 @@ +Use Experiments with Runtime sessions +===================================== + +Problem +------- + +You want to run experiments in a `Runtime session +`_ so that jobs can run in close temporal proximity. + +Solution +-------- + +Use the :class:`~qiskit_ibm_provider.IBMBackend` object in ``qiskit-ibm-provider``, which supports sessions. + +In this example, we will set the ``max_circuits`` property to an artificially low value so that the experiment will be +split into multiple jobs that run sequentially in a single session. When running real experiments with a +large number of circuits that can't fit in a single job, it may be helpful to follow this usage pattern: + +.. jupyter-input:: + + from qiskit_ibm_provider import IBMProvider + from qiskit_experiments.library.tomography import ProcessTomography + from qiskit import QuantumCircuit + + provider = IBMProvider() + backend = provider.get_backend("ibm_nairobi") + qc = QuantumCircuit(1) + qc.x(0) + + with backend.open_session() as session: + exp = ProcessTomography(qc) + exp.set_experiment_options(max_circuits=3) + exp_data = exp.run(backend) + exp_data.block_for_results() + # Calling cancel because session.close() is not available for qiskit-ibm-provider<=0.7.2. + # It is safe to call cancel since block_for_results() ensures there are no outstanding jobs + # still running that would be canceled. + session.cancel() + +Note that runtime primitives are not currently supported natively in Qiskit Experiments, so +the ``backend.run()`` path is required to run experiments. diff --git a/docs/manuals/characterization/tphi.rst b/docs/manuals/characterization/tphi.rst index d3ac38078b..3e0f25503e 100644 --- a/docs/manuals/characterization/tphi.rst +++ b/docs/manuals/characterization/tphi.rst @@ -54,11 +54,11 @@ relaxation time estimate. We can see that the component experiments of the batch .. jupyter-execute:: exp = Tphi(physical_qubits=(0,), delays_t1=delays_t1, delays_t2=delays_t2, num_echoes=1) - exp.component_experiment(0).circuits()[-1].draw("mpl") + exp.component_experiment(0).circuits()[-1].draw(output="mpl", style="iqp") .. jupyter-execute:: - exp.component_experiment(1).circuits()[-1].draw("mpl") + exp.component_experiment(1).circuits()[-1].draw(output="mpl", style="iqp") Run the experiment and print results: @@ -94,7 +94,7 @@ experiment: t2type="ramsey", osc_freq=1e5) - exp.component_experiment(1).circuits()[-1].draw("mpl") + exp.component_experiment(1).circuits()[-1].draw(output="mpl", style="iqp") Run and display results: diff --git a/docs/manuals/measurement/readout_mitigation.rst b/docs/manuals/measurement/readout_mitigation.rst index 2b69142d27..0fed52b920 100644 --- a/docs/manuals/measurement/readout_mitigation.rst +++ b/docs/manuals/measurement/readout_mitigation.rst @@ -180,4 +180,4 @@ See also * API documentation: :mod:`~qiskit_experiments.library.characterization.LocalReadoutError`, :mod:`~qiskit_experiments.library.characterization.CorrelatedReadoutError` -* Qiskit Textbook: `Measurement Error Mitigation `__ +* Qiskit Textbook: `Measurement Error Mitigation `__ diff --git a/docs/manuals/verification/quantum_volume.rst b/docs/manuals/verification/quantum_volume.rst index a4ea74e771..2b01b515fa 100644 --- a/docs/manuals/verification/quantum_volume.rst +++ b/docs/manuals/verification/quantum_volume.rst @@ -8,7 +8,7 @@ that the computer successfully implements. Quantum computing systems with high-fidelity operations, high connectivity, large calibrated gate sets, and circuit rewriting toolchains are expected to have higher quantum volumes. See the `Qiskit -Textbook `__ +Textbook `__ for an explanation on the QV method, which is described in Refs. [1]_ [2]_. The Quantum Volume is determined by the largest successful circuit depth @@ -20,8 +20,8 @@ a random permutation on the :math:`d` qubit. Then these circuits run on the quantum backend and on an ideal simulator (either :class:`qiskit_aer.AerSimulator` or :class:`qiskit.quantum_info.Statevector`). -A depth :math:`d` QV circuit is successful if it has ‘mean heavy-output -probability’ > 2/3 with confidence level > 0.977 (corresponding to +A depth :math:`d` QV circuit is successful if it has `mean heavy-output +probability` > 2/3 with confidence level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran. .. note:: @@ -68,7 +68,7 @@ more trials may reduce the error bars to allow passing the threshold. The analysis results of the QV Experiment are: -- The mean heavy output probabilities (HOP) and standard deviation +- The mean heavy-output probabilities (HOP) and standard deviation - The calculated quantum volume, which will be None if the experiment does not pass the threshold @@ -190,5 +190,5 @@ See also -------- * API documentation: :mod:`~qiskit_experiments.library.quantum_volume` -* Qiskit Textbook: `Measuring Quantum Volume `__ +* Qiskit Textbook: `Measuring Quantum Volume `__ diff --git a/docs/manuals/verification/randomized_benchmarking.rst b/docs/manuals/verification/randomized_benchmarking.rst index 16fbd07f20..111e3f5ddc 100644 --- a/docs/manuals/verification/randomized_benchmarking.rst +++ b/docs/manuals/verification/randomized_benchmarking.rst @@ -8,7 +8,7 @@ identity. After running the circuits, the number of shots resulting in an error output different from the ground state) are counted, and from this data one can infer error estimates for the quantum device, by calculating the Error Per Clifford. See the `Qiskit Textbook -`__ for an +`__ for an explanation on the RB method, which is based on Refs. [1]_ [2]_. .. jupyter-execute:: @@ -215,20 +215,20 @@ The default RB circuit output shows Clifford blocks: # Run an RB experiment on qubit 0 exp = StandardRB(physical_qubits=(0,), lengths=[2], num_samples=1, seed=seed) c = exp.circuits()[0] - c.draw("mpl") + c.draw(output="mpl", style="iqp") You can decompose the circuit into underlying gates: .. jupyter-execute:: - c.decompose().draw("mpl") + c.decompose().draw(output="mpl", style="iqp") And see the transpiled circuit using the basis gate set of the backend: .. jupyter-execute:: from qiskit import transpile - transpile(c, backend, **vars(exp.transpile_options)).draw("mpl", idle_wires=False) + transpile(c, backend, **vars(exp.transpile_options)).draw(output="mpl", style="iqp", idle_wires=False) .. note:: In 0.5.0, the default value of ``optimization_level`` in ``transpile_options`` changed @@ -309,4 +309,4 @@ See also -------- * API documentation: :mod:`~qiskit_experiments.library.randomized_benchmarking` -* Qiskit Textbook: `Randomized Benchmarking `__ +* Qiskit Textbook: `Randomized Benchmarking `__ diff --git a/docs/tutorials/calibrations.rst b/docs/tutorials/calibrations.rst index b1557a0c0e..d3d5967850 100644 --- a/docs/tutorials/calibrations.rst +++ b/docs/tutorials/calibrations.rst @@ -12,9 +12,8 @@ measurement data manually. In this tutorial, we demonstrate how to calibrate single-qubit gates using the calibration framework in Qiskit Experiments. We will run experiments on our test pulse backend, :class:`.SingleTransmonTestBackend`, a backend that simulates the underlying -pulses with `Qiskit Dynamics `_ on a -three-level model of a transmon. You can also run these experiments on any real backend -with Pulse enabled (see +pulses with :mod:`qiskit_dynamics` on a three-level model of a transmon. You can also +run these experiments on any real backend with Pulse enabled (see :external+qiskit:doc:`tutorials/circuits_advanced/08_gathering_system_information`). We will run experiments to @@ -34,10 +33,6 @@ This automatic updating can also be disabled using the ``auto_update`` flag. This tutorial requires the :mod:`qiskit_dynamics` package to run simulations. You can install it with ``python -m pip install qiskit-dynamics``. -.. note:: - This tutorial requires the ``pandas`` package to visualize calibration tables. - You can install it with ``python -m pip install pandas``. - .. jupyter-execute:: import pandas as pd @@ -176,7 +171,7 @@ Instantiate the experiment and draw the first circuit in the sweep: .. jupyter-execute:: circuit = spec.circuits()[0] - circuit.draw(output="mpl") + circuit.draw(output="mpl", style="iqp") We can also visualize the pulse schedule for the circuit: @@ -200,8 +195,8 @@ Run the calibration experiment: The instance of ``calibrations`` has been automatically updated with the measured frequency, as shown below. In addition to the columns shown below, ``calibrations`` also -store the group to which a value belongs, whether a values is valid or not and the -experiment id that produce a value. +stores the group to which a value belongs, whether a value is valid or not, and the +experiment id that produced a value. .. jupyter-execute:: @@ -229,7 +224,7 @@ with different amplitudes. .. jupyter-execute:: - rabi.circuits()[0].draw("mpl") + rabi.circuits()[0].draw(output="mpl", style="iqp") After the experiment completes the value of the amplitudes in the calibrations will automatically be updated. This behaviour can be controlled using the ``auto_update`` @@ -320,7 +315,7 @@ negative amplitude. from qiskit_experiments.library import RoughDragCal cal_drag = RoughDragCal([qubit], cals, backend=backend, betas=np.linspace(-20, 20, 25)) cal_drag.set_experiment_options(reps=[3, 5, 7]) - cal_drag.circuits()[5].draw(output='mpl') + cal_drag.circuits()[5].draw(output="mpl", style="iqp") .. jupyter-execute:: @@ -397,7 +392,7 @@ over/under rotations is the highest. overamp_exp = FineXAmplitude((qubit,), backend=backend) overamp_exp.set_transpile_options(inst_map=inst_map) - overamp_exp.circuits()[4].draw(output='mpl') + overamp_exp.circuits()[4].draw(output="mpl", style="iqp") .. jupyter-execute:: @@ -464,7 +459,7 @@ error which we want to correct. from qiskit_experiments.library import FineSXAmplitudeCal amp_cal = FineSXAmplitudeCal((qubit,), cals, backend=backend, schedule_name="sx") - amp_cal.circuits()[4].draw(output="mpl") + amp_cal.circuits()[4].draw(output="mpl", style="iqp") Let's run the calibration experiment: @@ -495,7 +490,7 @@ See also -------- * API documentation: :mod:`~qiskit_experiments.calibration_management` and :mod:`~qiskit_experiments.library.calibration` -* Qiskit Textbook: `Calibrating Qubits with Qiskit Pulse `__ +* Qiskit Textbook: `Calibrating Qubits with Qiskit Pulse `__ diff --git a/docs/tutorials/curve_analysis.rst b/docs/tutorials/curve_analysis.rst index 0f9abe6295..0bcabcb43d 100644 --- a/docs/tutorials/curve_analysis.rst +++ b/docs/tutorials/curve_analysis.rst @@ -266,6 +266,11 @@ every logic defined in ``AnalysisA``. Curve Analysis workflow ----------------------- +.. warning:: + + :class:`CurveData` dataclass is replaced with :class:`.ScatterTable` dataframe. + This class will be deprecated and removed in the future release. + Typically curve analysis performs fitting as follows. This workflow is defined in the method :meth:`CurveAnalysis._run_analysis`. diff --git a/docs/tutorials/custom_experiment.rst b/docs/tutorials/custom_experiment.rst index 1173cba12a..c1ccf36546 100644 --- a/docs/tutorials/custom_experiment.rst +++ b/docs/tutorials/custom_experiment.rst @@ -573,7 +573,7 @@ Let's use a GHZ circuit as the input: for i in range(1, nq): qc.cx(i-1, i) - qc.draw("mpl") + qc.draw(output="mpl", style="iqp") Check that the experiment is appending a random Pauli and measurements as expected: @@ -586,7 +586,7 @@ Check that the experiment is appending a random Pauli and measurements as expect # Run ideal randomized meas experiment exp = RandomizedMeasurement(qc, num_samples=num_samples) - exp.circuits()[0].draw("mpl") + exp.circuits()[0].draw(output="mpl", style="iqp") We now run the experiment with a GHZ circuit on an ideal backend, which produces nearly perfect symmetrical results between :math:`|0000\rangle` and :math:`|1111\rangle`: @@ -640,4 +640,4 @@ unaffected by the added randomized measurements, which use its own classical reg qc.cx(i-1, i) exp = RandomizedMeasurement(qc, num_samples=num_samples) - exp.circuits()[0].draw("mpl") \ No newline at end of file + exp.circuits()[0].draw(output="mpl", style="iqp") \ No newline at end of file diff --git a/docs/tutorials/getting_started.rst b/docs/tutorials/getting_started.rst index 4509de94a7..339b81d3b5 100644 --- a/docs/tutorials/getting_started.rst +++ b/docs/tutorials/getting_started.rst @@ -101,11 +101,11 @@ first and last circuits for our :math:`T_1` experiment: .. jupyter-execute:: print(delays) - exp.circuits()[0].draw(output='mpl') + exp.circuits()[0].draw(output="mpl", style="iqp") .. jupyter-execute:: - exp.circuits()[-1].draw(output='mpl') + exp.circuits()[-1].draw(output="mpl", style="iqp") As expected, the delay block spans the full range of time values that we specified. @@ -238,6 +238,8 @@ supports can be set: exp.set_run_options(shots=1000, meas_level=MeasLevel.CLASSIFIED) + print(f"Shots set to {exp.run_options.get('shots')}, " + "measurement level set to {exp.run_options.get('meas_level')}") Consult the documentation of the run method of your specific backend type for valid options. @@ -253,6 +255,7 @@ before execution: exp.set_transpile_options(scheduling_method='asap', optimization_level=3, basis_gates=["x", "sx", "rz"]) + print(f"Transpile options are {exp.transpile_options}") Consult the documentation of :func:`qiskit.compiler.transpile` for valid options. @@ -267,6 +270,7 @@ upon experiment instantiation, but can also be explicitly set via exp = T1(physical_qubits=(0,), delays=delays) new_delays=np.arange(1e-6, 600e-6, 50e-6) exp.set_experiment_options(delays=new_delays) + print(f"Experiment options are {exp.experiment_options}") Consult the :doc:`API documentation ` for the options of each experiment class. @@ -274,7 +278,7 @@ class. Analysis options ---------------- -These options are unique to each analysis class. Unlike the other options, analyis +These options are unique to each analysis class. Unlike the other options, analysis options are not directly set via the experiment object but use instead a method of the associated ``analysis``: @@ -295,7 +299,7 @@ Running experiments on multiple qubits ====================================== To run experiments across many qubits of the same device, we use **composite -experiments**. A composite experiment is a parent object that contains one or more child +experiments**. A :class:`.CompositeExperiment` is a parent object that contains one or more child experiments, which may themselves be composite. There are two core types of composite experiments: @@ -323,15 +327,25 @@ Note that when the transpile and run options are set for a composite experiment, child experiments's options are also set to the same options recursively. Let's examine how the parallel experiment is constructed by visualizing child and parent circuits. The child experiments can be accessed via the -:meth:`~.ParallelExperiment.component_experiment` method, which indexes from zero: +:meth:`~.CompositeExperiment.component_experiment` method, which indexes from zero: .. jupyter-execute:: - parallel_exp.component_experiment(0).circuits()[0].draw(output='mpl') + parallel_exp.component_experiment(0).circuits()[0].draw(output="mpl", style="iqp") .. jupyter-execute:: - parallel_exp.component_experiment(1).circuits()[0].draw(output='mpl') + parallel_exp.component_experiment(1).circuits()[0].draw(output="mpl", style="iqp") + +Similarly, the child analyses can be accessed via :meth:`.CompositeAnalysis.component_analysis` or via +the analysis of the child experiment class: + +.. jupyter-execute:: + + parallel_exp.component_experiment(0).analysis.set_options(plot = True) + + # This should print out what we set because it's the same option + print(parallel_exp.analysis.component_analysis(0).options.get("plot")) The circuits of all experiments assume they're acting on virtual qubits starting from index 0. In the case of a parallel experiment, the child experiment @@ -339,7 +353,7 @@ circuits are composed together and then reassigned virtual qubit indices: .. jupyter-execute:: - parallel_exp.circuits()[0].draw(output='mpl') + parallel_exp.circuits()[0].draw(output="mpl", style="iqp") During experiment transpilation, a mapping is performed to place these circuits on the physical layout. We can see its effects by looking at the transpiled @@ -349,7 +363,7 @@ and the :class:`.StandardRB` experiment's gates are on physical qubits 3 and 1. .. jupyter-execute:: - parallel_exp._transpiled_circuits()[0].draw(output='mpl') + parallel_exp._transpiled_circuits()[0].draw(output="mpl", style="iqp") :class:`.ParallelExperiment` and :class:`.BatchExperiment` classes can also be nested arbitrarily to make complex composite experiments. @@ -393,4 +407,17 @@ into one level: parallel_data = parallel_exp.run(backend, seed_simulator=101).block_for_results() for result in parallel_data.analysis_results(): - print(result) \ No newline at end of file + print(result) + +Broadcasting analysis options to child experiments +-------------------------------------------------- + +Use the `broadcast` parameter to set analysis options to each of the child experiments. + +.. jupyter-execute:: + + parallel_exp.analysis.set_options(plot=False, broadcast=True) + +If the child experiment inherits from :class:`.CompositeExperiment` (such as :class:`.ParallelExperiment` +and :class:`.BatchExperiment` classes), this process will continue to work recursively. +In this instance, the analysis will not generate a figure for the child experiment after the analysis. \ No newline at end of file diff --git a/docs/tutorials/index.rst b/docs/tutorials/index.rst index 830792b0f8..cee8c15b7c 100644 --- a/docs/tutorials/index.rst +++ b/docs/tutorials/index.rst @@ -10,11 +10,14 @@ They're suitable for beginners who want to get started with the package. The Basics ---------- +.. This toctree is hardcoded since Getting Started is already included in the sidebar for more visibility. + .. toctree:: - :maxdepth: 2 + :maxdepth: 1 intro - + getting_started + Exploring Modules ----------------- diff --git a/qiskit_experiments/calibration_management/__init__.py b/qiskit_experiments/calibration_management/__init__.py index 82be9ae50d..d04e8eb064 100644 --- a/qiskit_experiments/calibration_management/__init__.py +++ b/qiskit_experiments/calibration_management/__init__.py @@ -23,7 +23,7 @@ otherwise indicated. Calibrating qubit setups is the task of finding the pulse shapes and parameter -values that maximizes the fidelity of the resulting quantum operations. This +values that maximize the fidelity of the resulting quantum operations. This therefore requires experiments which are analyzed to extract parameter values. Furthermore, the resulting parameter values and schedules must be managed. The calibration management module in Qiskit experiments allows users to manage diff --git a/qiskit_experiments/calibration_management/calibrations.py b/qiskit_experiments/calibration_management/calibrations.py index 0ee98c707b..4058dcad7e 100644 --- a/qiskit_experiments/calibration_management/calibrations.py +++ b/qiskit_experiments/calibration_management/calibrations.py @@ -1270,12 +1270,12 @@ def schedules(self) -> List[Dict[str, Any]]: Returns: data: A list of dictionaries with all the schedules in it. The key-value pairs are - * 'qubits': the qubits to which this schedule applies. This may be an empty - tuple () if the schedule is the default for all qubits. - * 'schedule': The schedule. - * 'parameters': The parameters in the schedule exposed for convenience. + * ``qubits``: the qubits to which this schedule applies. This may be an empty + tuple () if the schedule is the default for all qubits. + * ``schedule``: The schedule. + * ``parameters``: The parameters in the schedule exposed for convenience. - This list of dictionaries can easily be converted to a data frame. + This list of dictionaries can easily be converted to a data frame. """ data = [] for key, sched in self._schedules.items(): diff --git a/qiskit_experiments/curve_analysis/__init__.py b/qiskit_experiments/curve_analysis/__init__.py index 2a3b77e7a0..2db6044fff 100644 --- a/qiskit_experiments/curve_analysis/__init__.py +++ b/qiskit_experiments/curve_analysis/__init__.py @@ -117,6 +117,7 @@ from .base_curve_analysis import BaseCurveAnalysis from .curve_analysis import CurveAnalysis from .composite_curve_analysis import CompositeCurveAnalysis +from .scatter_table import ScatterTable from .curve_data import ( CurveData, CurveFitResult, diff --git a/qiskit_experiments/curve_analysis/base_curve_analysis.py b/qiskit_experiments/curve_analysis/base_curve_analysis.py index 408aff89a4..51fd9d29b2 100644 --- a/qiskit_experiments/curve_analysis/base_curve_analysis.py +++ b/qiskit_experiments/curve_analysis/base_curve_analysis.py @@ -38,7 +38,8 @@ MplDrawer, ) -from .curve_data import CurveData, CurveFitResult, ParameterRepr +from .curve_data import CurveFitResult, ParameterRepr +from .scatter_table import ScatterTable PARAMS_ENTRY_PREFIX = "@Parameters_" DATA_ENTRY_PREFIX = "@Data_" @@ -99,11 +100,16 @@ class BaseCurveAnalysis(BaseAnalysis, ABC): .. rubric:: _create_curve_data - This method to creates analysis results for the formatted dataset, i.e. data used for the fitting. + This method creates analysis results for the formatted dataset, i.e. data used for the fitting. Entries are created when the analysis option ``return_data_points`` is ``True``. If analysis consists of multiple series, analysis result is created for each curve data in the series definitions. + .. rubric:: _create_figures + + This method creates figures by consuming the scatter table data. + Figures are created when the analysis option ``plot`` is ``True``. + .. rubric:: _initialize This method initializes analysis options against input experiment data. @@ -154,8 +160,8 @@ def _default_options(cls) -> Options: the analysis result. plot_raw_data (bool): Set ``True`` to draw processed data points, dataset without formatting, on canvas. This is ``False`` by default. - plot (bool): Set ``True`` to create figure for fit result. - This is ``True`` by default. + plot (bool): Set ``True`` to create figure for fit result or ``False`` to + not create a figure. This overrides the behavior of ``generate_figures``. return_fit_parameters (bool): Set ``True`` to return all fit model parameters with details of the fit outcome. Default to ``True``. return_data_points (bool): Set ``True`` to include in the analysis result @@ -207,7 +213,6 @@ def _default_options(cls) -> Options: options.plotter = CurvePlotter(MplDrawer()) options.plot_raw_data = False - options.plot = True options.return_fit_parameters = True options.return_data_points = False options.data_processor = None @@ -277,29 +282,21 @@ def set_options(self, **fields): def _run_data_processing( self, raw_data: List[Dict], - models: List[lmfit.Model], - ) -> CurveData: + ) -> ScatterTable: """Perform data processing from the experiment result payload. Args: raw_data: Payload in the experiment data. - models: A list of LMFIT models that provide the model name and - optionally data sorting keys. Returns: Processed data that will be sent to the formatter method. - - Raises: - DataProcessorError: When model is multi-objective function but - data sorting option is not provided. - DataProcessorError: When key for x values is not found in the metadata. """ @abstractmethod def _format_data( self, - curve_data: CurveData, - ) -> CurveData: + curve_data: ScatterTable, + ) -> ScatterTable: """Postprocessing for the processed dataset. Args: @@ -312,15 +309,12 @@ def _format_data( @abstractmethod def _run_curve_fit( self, - curve_data: CurveData, - models: List[lmfit.Model], + curve_data: ScatterTable, ) -> CurveFitResult: """Perform curve fitting on given data collection and fit models. Args: curve_data: Formatted data to fit. - models: A list of LMFIT models that are used to build a cost function - for the LMFIT minimizer. Returns: The best fitting outcome with minimum reduced chi-squared value. @@ -338,7 +332,7 @@ def _evaluate_quality( Returns: String that represents fit result quality. Usually "good" or "bad". """ - if fit_data.reduced_chisq < 3.0: + if 0 < fit_data.reduced_chisq < 3.0: return "good" return "bad" @@ -387,35 +381,32 @@ def _create_analysis_results( return outcomes + # pylint: disable=unused-argument def _create_curve_data( self, - curve_data: CurveData, - models: List[lmfit.Model], + curve_data: ScatterTable, **metadata, ) -> List[AnalysisResultData]: """Create analysis results for raw curve data. Args: curve_data: Formatted data that is used for the fitting. - models: A list of LMFIT models that provides model names - to extract subsets of experiment data. Returns: List of analysis result data. """ samples = [] - for model in models: - sub_data = curve_data.get_subset_of(model._name) + for model_name, sub_data in list(curve_data.groupby("model_name")): raw_datum = AnalysisResultData( name=DATA_ENTRY_PREFIX + self.__class__.__name__, value={ - "xdata": sub_data.x, - "ydata": sub_data.y, - "sigma": sub_data.y_err, + "xdata": sub_data.xval.to_numpy(), + "ydata": sub_data.yval.to_numpy(), + "sigma": sub_data.yerr.to_numpy(), }, extra={ - "name": model._name, + "name": model_name, **metadata, }, ) @@ -423,6 +414,20 @@ def _create_curve_data( return samples + def _create_figures( + self, + curve_data: ScatterTable, + ) -> List["matplotlib.figure.Figure"]: + """Create a list of figures from the curve data. + + Args: + curve_data: Scatter data table containing all data points. + + Returns: + A list of figures. + """ + return [] + def _initialize( self, experiment_data: ExperimentData, diff --git a/qiskit_experiments/curve_analysis/composite_curve_analysis.py b/qiskit_experiments/curve_analysis/composite_curve_analysis.py index 093bc5f791..6232eda210 100644 --- a/qiskit_experiments/curve_analysis/composite_curve_analysis.py +++ b/qiskit_experiments/curve_analysis/composite_curve_analysis.py @@ -19,6 +19,7 @@ import lmfit import numpy as np +import pandas as pd from uncertainties import unumpy as unp from qiskit.utils.deprecation import deprecate_func @@ -39,6 +40,7 @@ from .base_curve_analysis import PARAMS_ENTRY_PREFIX, BaseCurveAnalysis from .curve_data import CurveFitResult +from .scatter_table import ScatterTable from .utils import eval_with_uncertainties @@ -101,6 +103,11 @@ class CompositeCurveAnalysis(BaseAnalysis): This method is passed all the group fit outcomes and can return a list of new values to be stored in the analysis results. + .. rubric:: _create_figures + + This method creates figures by consuming the scatter table data. + Figures are created when the analysis option ``plot`` is ``True``. + """ def __init__( @@ -210,6 +217,52 @@ def _create_analysis_results( """ return [] + def _create_figures( + self, + curve_data: ScatterTable, + ) -> List["matplotlib.figure.Figure"]: + """Create a list of figures from the curve data. + + Args: + curve_data: Scatter data table containing all data points. + + Returns: + A list of figures. + """ + for analysis in self.analyses(): + sub_data = curve_data[curve_data.model_name.str.endswith(f"_{analysis.name}")] + for model_id, data in list(sub_data.groupby("model_id")): + model_name = analysis._models[model_id]._name + # Plot raw data scatters + if analysis.options.plot_raw_data: + raw_data = data.filter(like="processed", axis="index") + self.plotter.set_series_data( + series_name=model_name, + x=raw_data.xval.to_numpy(), + y=raw_data.yval.to_numpy(), + ) + # Plot formatted data scatters + formatted_data = data.filter(like="formatted", axis="index") + self.plotter.set_series_data( + series_name=model_name, + x_formatted=formatted_data.xval.to_numpy(), + y_formatted=formatted_data.yval.to_numpy(), + y_formatted_err=formatted_data.yerr.to_numpy(), + ) + # Plot fit lines + line_data = data.filter(like="fitted", axis="index") + if len(line_data) == 0: + continue + fit_stdev = line_data.yerr.to_numpy() + self.plotter.set_series_data( + series_name=model_name, + x_interp=line_data.xval.to_numpy(), + y_interp=line_data.yval.to_numpy(), + y_interp_err=fit_stdev if np.isfinite(fit_stdev).all() else None, + ) + + return [self.plotter.figure()] + @classmethod def _default_options(cls) -> Options: """Default analysis options. @@ -279,55 +332,45 @@ def _run_analysis( experiment_data: ExperimentData, ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: + # Flag for plotting can be "always", "never", or "selective" + # the analysis option overrides self._generate_figures if set + if self.options.get("plot", None): + plot = "always" + elif self.options.get("plot", None) is False: + plot = "never" + else: + plot = getattr(self, "_generate_figures", "always") + analysis_results = [] + figures = [] fit_dataset = {} - red_chi = {} + curve_data_set = [] for analysis in self._analyses: analysis._initialize(experiment_data) + analysis.set_options(plot=False) metadata = analysis.options.extra.copy() metadata["group"] = analysis.name - processed_data = analysis._run_data_processing( - raw_data=experiment_data.data(), - models=analysis.models, - ) - - if self.options.plot and analysis.options.plot_raw_data: - for model in analysis.models: - sub_data = processed_data.get_subset_of(model._name) - self.plotter.set_series_data( - model._name + f"_{analysis.name}", - x=sub_data.x, - y=sub_data.y, - ) - - # Format data - formatted_data = analysis._format_data(processed_data) - if self.options.plot: - for model in analysis.models: - sub_data = formatted_data.get_subset_of(model._name) - self.plotter.set_series_data( - model._name + f"_{analysis.name}", - x_formatted=sub_data.x, - y_formatted=sub_data.y, - y_formatted_err=sub_data.y_err, - ) - - # Run fitting - fit_data = analysis._run_curve_fit( - curve_data=formatted_data, - models=analysis.models, + curve_data = analysis._format_data( + analysis._run_data_processing(experiment_data.data()) ) + fit_data = analysis._run_curve_fit(curve_data.filter(like="formatted", axis="index")) + fit_dataset[analysis.name] = fit_data if fit_data.success: quality = analysis._evaluate_quality(fit_data) - red_chi[analysis.name] = fit_data.reduced_chisq else: quality = "bad" + # After the quality is determined, plot can become a boolean flag for whether + # to generate the figure + plot_bool = plot == "always" or (plot == "selective" and quality == "bad") + if self.options.return_fit_parameters: + # Store fit status overview entry regardless of success. + # This is sometime useful when debugging the fitting code. overview = AnalysisResultData( name=PARAMS_ENTRY_PREFIX + analysis.name, value=fit_data, @@ -337,65 +380,73 @@ def _run_analysis( analysis_results.append(overview) if fit_data.success: - # Add extra analysis results + # Add fit data to curve data table + fit_curves = [] + formatted = curve_data.filter(like="formatted", axis="index") + columns = list(curve_data.columns) + for i, sub_data in list(formatted.groupby("model_id")): + name = analysis._models[i]._name + xval = sub_data.xval.to_numpy() + if len(xval) == 0: + # If data is empty, skip drawing this model. + # This is the case when fit model exist but no data to fit is provided. + continue + # Compute X, Y values with fit parameters. + xval_fit = np.linspace(np.min(xval), np.max(xval), num=100) + yval_fit = eval_with_uncertainties( + x=xval_fit, + model=analysis.models[i], + params=fit_data.ufloat_params, + ) + model_fit = np.full((100, len(columns)), np.nan, dtype=object) + fit_curves.append(model_fit) + model_fit[:, columns.index("xval")] = xval_fit + model_fit[:, columns.index("yval")] = unp.nominal_values(yval_fit) + if fit_data.covar is not None: + model_fit[:, columns.index("yerr")] = unp.std_devs(yval_fit) + model_fit[:, columns.index("model_name")] = name + model_fit[:, columns.index("model_id")] = i + curve_data = curve_data.append_list_values( + other=np.vstack(fit_curves), + prefix="fitted", + ) analysis_results.extend( analysis._create_analysis_results( - fit_data=fit_data, quality=quality, **metadata.copy() + fit_data=fit_data, + quality=quality, + **metadata.copy(), ) ) - # Draw fit result - if self.options.plot: - x_interp = np.linspace( - np.min(formatted_data.x), np.max(formatted_data.x), num=100 - ) - for model in analysis.models: - y_data_with_uncertainty = eval_with_uncertainties( - x=x_interp, - model=model, - params=fit_data.ufloat_params, - ) - y_interp = unp.nominal_values(y_data_with_uncertainty) - # Add fit line data - self.plotter.set_series_data( - model._name + f"_{analysis.name}", - x_interp=x_interp, - y_interp=y_interp, - ) - if fit_data.covar is not None: - # Add confidence interval data - y_interp_err = unp.std_devs(y_data_with_uncertainty) - if np.isfinite(y_interp_err).all(): - self.plotter.set_series_data( - model._name + f"_{analysis.name}", - y_interp_err=y_interp_err, - ) - - # Add raw data points if self.options.return_data_points: + # Add raw data points analysis_results.extend( analysis._create_curve_data( - curve_data=formatted_data, - models=analysis.models, + curve_data=curve_data.filter(like="formatted", axis="index"), **metadata, ) ) - fit_dataset[analysis.name] = fit_data + curve_data.model_name += f"_{analysis.name}" + curve_data_set.append(curve_data) + combined_curve_data = pd.concat(curve_data_set) total_quality = self._evaluate_quality(fit_dataset) - if red_chi: - self.plotter.set_supplementary_data(fit_red_chi=red_chi) # Create analysis results by combining all fit data if all(fit_data.success for fit_data in fit_dataset.values()): - primary_results = self._create_analysis_results( + composite_results = self._create_analysis_results( fit_data=fit_dataset, quality=total_quality, **self.options.extra.copy() ) - analysis_results.extend(primary_results) - self.plotter.set_supplementary_data(primary_results=primary_results) + analysis_results.extend(composite_results) + else: + composite_results = [] - if self.options.plot: - return analysis_results, [self.plotter.figure()] + if plot_bool: + self.plotter.set_supplementary_data( + fit_red_chi={k: v.reduced_chisq for k, v in fit_dataset.items() if v.success}, + primary_results=composite_results, + ) + figures.extend(self._create_figures(curve_data=combined_curve_data)) - return analysis_results, [] + return analysis_results, figures diff --git a/qiskit_experiments/curve_analysis/curve_analysis.py b/qiskit_experiments/curve_analysis/curve_analysis.py index 383931f1ab..7fec75b0b4 100644 --- a/qiskit_experiments/curve_analysis/curve_analysis.py +++ b/qiskit_experiments/curve_analysis/curve_analysis.py @@ -16,6 +16,9 @@ # pylint: disable=invalid-name from typing import Dict, List, Tuple, Union, Optional +from functools import partial +from itertools import groupby +from operator import itemgetter import lmfit import numpy as np @@ -25,8 +28,15 @@ from qiskit_experiments.data_processing.exceptions import DataProcessorError from .base_curve_analysis import BaseCurveAnalysis, PARAMS_ENTRY_PREFIX -from .curve_data import CurveData, FitOptions, CurveFitResult -from .utils import eval_with_uncertainties, convert_lmfit_result, multi_mean_xy_data, data_sort +from .curve_data import FitOptions, CurveFitResult +from .scatter_table import ScatterTable +from .utils import ( + eval_with_uncertainties, + convert_lmfit_result, + shot_weighted_average, + inverse_weighted_variance, + sample_average, +) class CurveAnalysis(BaseCurveAnalysis): @@ -84,6 +94,11 @@ class CurveAnalysis(BaseCurveAnalysis): If analysis consists of multiple series, an analysis result is created for each series definition. + .. rubric:: _create_figures + + This method creates figures by consuming the scatter table data. + Figures are created when the analysis option ``plot`` is ``True``. + .. rubric:: _initialize This method initializes analysis options against input experiment data. @@ -135,122 +150,168 @@ def models(self) -> List[lmfit.Model]: def _run_data_processing( self, raw_data: List[Dict], - models: List[lmfit.Model], - ) -> CurveData: + ) -> ScatterTable: """Perform data processing from the experiment result payload. Args: raw_data: Payload in the experiment data. - models: A list of LMFIT models that provide the model name and - optionally data sorting keys. Returns: Processed data that will be sent to the formatter method. Raises: - DataProcessorError: When model is a multi-objective function but - data sorting option is not provided. DataProcessorError: When key for x values is not found in the metadata. + DataProcessorError: When data_subfit_map information for a fit model is missing. + ValueError: When input data has incomplete metadata to specify fit model. """ + opt = self.options - def _matched(metadata, **filters): - try: - return all(metadata[key] == val for key, val in filters.items()) - except KeyError: - return False - - if not self.options.filter_data: - analyzed_data = raw_data + # Create table + if opt.filter_data: + to_process = [d for d in raw_data if opt.filter_data.items() <= d["metadata"].items()] else: - analyzed_data = [ - d for d in raw_data if _matched(d["metadata"], **self.options.filter_data) + to_process = raw_data + + # This must align with ScatterTable columns. Use struct array. + dtypes = np.dtype( + [ + ("xval", float), + ("yval", float), + ("yerr", float), + ("model_name", "U30"), # builtin str is U0 which is zero-length unicode in numpy + ("model_id", int), + ("shots", int), ] - - x_key = self.options.x_key - - try: - xdata = np.asarray([datum["metadata"][x_key] for datum in analyzed_data], dtype=float) - except KeyError as ex: - raise DataProcessorError( - f"X value key {x_key} is not defined in circuit metadata." - ) from ex - - ydata = self.options.data_processor(analyzed_data) - shots = np.asarray([datum.get("shots", np.nan) for datum in analyzed_data]) - - if len(models) == 1: - # all data belongs to the single model - data_allocation = np.full(xdata.size, 0, dtype=int) - else: - data_allocation = np.full(xdata.size, -1, dtype=int) - for idx, sub_model in enumerate(models): - try: - tags = self.options.data_subfit_map[sub_model._name] - except KeyError as ex: - raise DataProcessorError( - f"Data sort options for model {sub_model._name} is not defined. " - "Please provide the 'data_subfit_map' analysis option for this model." - ) from ex - if tags is None: - continue - matched_inds = np.asarray( - [_matched(d["metadata"], **tags) for d in analyzed_data], dtype=bool + ) + table_data = np.empty(len(to_process), dtype=dtypes) + for idx, datum in enumerate(to_process): + metadata = datum["metadata"].copy() + # Get xval from metadata + try: + xval = metadata.pop(opt.x_key) + except KeyError as ex: + raise DataProcessorError( + f"X value key {opt.x_key} is not defined in the circuit metadata." + ) from ex + # Classify fit model + if len(self._models) == 1: + m_id = 0 + m_name = self._models[0]._name + else: + for i, model in enumerate(self._models): + try: + model_spec = self.options.data_subfit_map[model._name] + except KeyError as ex: + raise DataProcessorError( + f"Mapping to data for the fit model {model._name} is not provided." + ) from ex + if model_spec.items() <= metadata.items(): + m_id = i + m_name = model._name + break + else: + raise ValueError(f"Experiment data {datum} doesn't belong to any fit model.") + table_data[idx]["xval"] = xval + table_data[idx]["shots"] = datum.get("shots", -1) + table_data[idx]["model_id"] = m_id + table_data[idx]["model_name"] = m_name + + # Add extra metadata + add_key = metadata.keys() - table_data.dtype.fields + if add_key: + # Add missing keys to struct array + # This code is lengthy but faster than merge_arrays function + add_dtypes = [] + for k in add_key: + if isinstance(metadata[k], str): + new_dtype = "U30" + else: + new_dtype = type(metadata[k]) + add_dtypes.append((k, new_dtype)) + new_table_data = np.empty( + len(to_process), dtype=sum((table_data.dtype.descr, add_dtypes), []) ) - data_allocation[matched_inds] = idx - - return CurveData( - x=xdata, - y=unp.nominal_values(ydata), - y_err=unp.std_devs(ydata), - shots=shots, - data_allocation=data_allocation, - labels=[sub_model._name for sub_model in models], + for k in table_data.dtype.fields: + new_table_data[k] = table_data[k] + table_data = new_table_data + for k, v in metadata.items(): + table_data[idx][k] = v + + # Compute y value + if not self.options.data_processor: + raise ValueError( + f"Data processor is not set for the {self.__class__.__name__} instance. " + "Initialize the instance with the experiment data, or set the " + "data_processor analysis options." + ) + processed_values = self.options.data_processor(to_process) + table_data["yval"] = unp.nominal_values(processed_values).flatten() + table_data["yerr"] = unp.std_devs(processed_values).flatten() + + out = ScatterTable( + data=table_data, + index=[f"processed-{i:04d}" for i in range(len(to_process))], ) + return out def _format_data( self, - curve_data: CurveData, - ) -> CurveData: + curve_data: ScatterTable, + ) -> ScatterTable: """Postprocessing for the processed dataset. Args: curve_data: Processed dataset created from experiment results. Returns: - Formatted data. + New scatter table instance including raw and formatted data. """ - # take average over the same x value by keeping sigma - data_allocation, xdata, ydata, sigma, shots = multi_mean_xy_data( - series=curve_data.data_allocation, - xdata=curve_data.x, - ydata=curve_data.y, - sigma=curve_data.y_err, - shots=curve_data.shots, - method=self.options.average_method, - ) - - # sort by x value in ascending order - data_allocation, xdata, ydata, sigma, shots = data_sort( - series=data_allocation, - xdata=xdata, - ydata=ydata, - sigma=sigma, - shots=shots, + averaging_methods = { + "shots_weighted": shot_weighted_average, + "iwv": inverse_weighted_variance, + "sample": sample_average, + } + + columns = list(curve_data.columns) + sort_by = itemgetter( + columns.index("model_id"), + columns.index("xval"), ) - - return CurveData( - x=xdata, - y=ydata, - y_err=sigma, - shots=shots, - data_allocation=data_allocation, - labels=curve_data.labels, + # Use python native groupby method on ndarray. This is more performant than pandas one. + average = averaging_methods[self.options.average_method] + formatted = [] + for (mid, xv), g in groupby(sorted(curve_data.values, key=sort_by), key=sort_by): + g_values = np.array(list(g)) + g_dict = dict(zip(columns, g_values.T)) + avg_yval, avg_yerr, shots = average(g_dict["yval"], g_dict["yerr"], g_dict["shots"]) + averaged = dict.fromkeys(columns) + averaged["xval"] = xv + averaged["yval"] = avg_yval + averaged["yerr"] = avg_yerr + averaged["model_id"] = mid + averaged["shots"] = shots + for k, v in g_dict.items(): + if averaged[k] is not None: + continue + if len(g_values) == 1: + averaged[k] = v[0] + else: + unique = set(v) + if len(unique) == 1: + averaged[k] = next(iter(unique)) + else: + averaged[k] = list(unique) + formatted.append(list(averaged.values())) + + return curve_data.append_list_values( + other=formatted, + prefix="formatted", ) def _generate_fit_guesses( self, user_opt: FitOptions, - curve_data: CurveData, # pylint: disable=unused-argument + curve_data: ScatterTable, # pylint: disable=unused-argument ) -> Union[FitOptions, List[FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. @@ -265,21 +326,18 @@ def _generate_fit_guesses( def _run_curve_fit( self, - curve_data: CurveData, - models: List[lmfit.Model], + curve_data: ScatterTable, ) -> CurveFitResult: """Perform curve fitting on given data collection and fit models. Args: curve_data: Formatted data to fit. - models: A list of LMFIT models that are used to build a cost function - for the LMFIT minimizer. Returns: The best fitting outcome with minimum reduced chi-squared value. """ unite_parameter_names = [] - for model in models: + for model in self._models: # Seems like this is not efficient looping, but using set operation sometimes # yields bad fit. Not sure if this is an edge case, but # `TestRamseyXY` unittest failed due to the significant chisq value @@ -314,26 +372,32 @@ def _run_curve_fit( if isinstance(fit_options, FitOptions): fit_options = [fit_options] - valid_uncertainty = np.all(np.isfinite(curve_data.y_err)) - - # Objective function for minimize. This computes composite residuals of sub models. - def _objective(_params): - ys = [] - for model in models: - sub_data = curve_data.get_subset_of(model._name) - with np.errstate(divide="ignore"): - # Ignore numpy runtime warning. - # Zero y_err point introduces infinite weight, - # but this should be managed by LMFIT. - weights = 1.0 / sub_data.y_err if valid_uncertainty else None - yi = model._residual( - params=_params, - data=sub_data.y, - weights=weights, - x=sub_data.x, + # Create convenient function to compute residual of the models. + partial_residuals = [] + valid_uncertainty = np.all(np.isfinite(curve_data.yerr.to_numpy())) + for i, sub_data in list(curve_data.groupby("model_id")): + if valid_uncertainty: + nonzero_yerr = np.where( + np.isclose(sub_data.yerr, 0.0), + np.finfo(float).eps, + sub_data.yerr, ) - ys.append(yi) - return np.concatenate(ys) + raw_weights = 1 / nonzero_yerr + # Remove outlier. When all sample values are the same with sample average, + # or sampling error is zero with shot-weighted average, + # some yerr values might be very close to zero, yielding significant weights. + # With such outlier, the fit doesn't sense residual of other data points. + maximum_weight = np.percentile(raw_weights, 90) + weights = np.clip(raw_weights, 0.0, maximum_weight) + else: + weights = None + model_residual = partial( + self._models[i]._residual, + data=sub_data.yval.to_numpy(), + weights=weights, + x=sub_data.xval.to_numpy(), + ) + partial_residuals.append(model_residual) # Run fit for each configuration res = None @@ -351,14 +415,15 @@ def _objective(_params): ) try: - new = lmfit.minimize( - fcn=_objective, - params=guess_params, - method=self.options.fit_method, - scale_covar=not valid_uncertainty, - nan_policy="omit", - **fit_option.fitter_opts, - ) + with np.errstate(all="ignore"): + new = lmfit.minimize( + fcn=lambda x: np.concatenate([p(x) for p in partial_residuals]), + params=guess_params, + method=self.options.fit_method, + scale_covar=not valid_uncertainty, + nan_policy="omit", + **fit_option.fitter_opts, + ) except Exception: # pylint: disable=broad-except continue @@ -369,56 +434,92 @@ def _objective(_params): if new.success and res.redchi > new.redchi: res = new - return convert_lmfit_result(res, models, curve_data.x, curve_data.y) - - def _run_analysis( - self, experiment_data: ExperimentData - ) -> Tuple[List[AnalysisResultData], List["pyplot.Figure"]]: - - # Prepare for fitting - self._initialize(experiment_data) + return convert_lmfit_result( + res, + self._models, + curve_data.xval.to_numpy(), + curve_data.yval.to_numpy(), + ) - analysis_results = [] + def _create_figures( + self, + curve_data: ScatterTable, + ) -> List["matplotlib.figure.Figure"]: + """Create a list of figures from the curve data. - # Run data processing - processed_data = self._run_data_processing( - raw_data=experiment_data.data(), - models=self._models, - ) + Args: + curve_data: Scatter data table containing all data points. - if self.options.plot and self.options.plot_raw_data: - for model in self._models: - sub_data = processed_data.get_subset_of(model._name) + Returns: + A list of figures. + """ + for model_id, data in list(curve_data.groupby("model_id")): + model_name = self._models[model_id]._name + # Plot raw data scatters + if self.options.plot_raw_data: + raw_data = data.filter(like="processed", axis="index") self.plotter.set_series_data( - model._name, - x=sub_data.x, - y=sub_data.y, + series_name=model_name, + x=raw_data.xval.to_numpy(), + y=raw_data.yval.to_numpy(), ) - - # Format data - formatted_data = self._format_data(processed_data) - if self.options.plot: - for model in self._models: - sub_data = formatted_data.get_subset_of(model._name) + # Plot formatted data scatters + formatted_data = data.filter(like="formatted", axis="index") + self.plotter.set_series_data( + series_name=model_name, + x_formatted=formatted_data.xval.to_numpy(), + y_formatted=formatted_data.yval.to_numpy(), + y_formatted_err=formatted_data.yerr.to_numpy(), + ) + # Plot fit lines + line_data = data.filter(like="fitted", axis="index") + if len(line_data) == 0: + continue + self.plotter.set_series_data( + series_name=model_name, + x_interp=line_data.xval.to_numpy(), + y_interp=line_data.yval.to_numpy(), + ) + fit_stdev = line_data.yerr.to_numpy() + if np.isfinite(fit_stdev).all(): self.plotter.set_series_data( - model._name, - x_formatted=sub_data.x, - y_formatted=sub_data.y, - y_formatted_err=sub_data.y_err, + series_name=model_name, + y_interp_err=fit_stdev, ) - # Run fitting - fit_data = self._run_curve_fit( - curve_data=formatted_data, - models=self._models, - ) + return [self.plotter.figure()] + + def _run_analysis( + self, + experiment_data: ExperimentData, + ) -> Tuple[List[AnalysisResultData], List["pyplot.Figure"]]: + analysis_results = [] + figures = [] + + # Flag for plotting can be "always", "never", or "selective" + # the analysis option overrides self._generate_figures if set + if self.options.get("plot", None): + plot = "always" + elif self.options.get("plot", None) is False: + plot = "never" + else: + plot = getattr(self, "_generate_figures", "always") + + # Prepare for fitting + self._initialize(experiment_data) + + curve_data = self._format_data(self._run_data_processing(experiment_data.data())) + fit_data = self._run_curve_fit(curve_data.filter(like="formatted", axis="index")) if fit_data.success: quality = self._evaluate_quality(fit_data) - self.plotter.set_supplementary_data(fit_red_chi=fit_data.reduced_chisq) else: quality = "bad" + # After the quality is determined, plot can become a boolean flag for whether + # to generate the figure + plot_bool = plot == "always" or (plot == "selective" and quality == "bad") + if self.options.return_fit_parameters: # Store fit status overview entry regardless of success. # This is sometime useful when debugging the fitting code. @@ -430,58 +531,62 @@ def _run_analysis( ) analysis_results.append(overview) - # Create figure and result data if fit_data.success: - # Create analysis results - primary_results = self._create_analysis_results( - fit_data=fit_data, quality=quality, **self.options.extra.copy() + # Add fit data to curve data table + fit_curves = [] + formatted = curve_data.filter(like="formatted", axis="index") + columns = list(curve_data.columns) + for i, sub_data in list(formatted.groupby("model_id")): + name = self._models[i]._name + xval = sub_data.xval.to_numpy() + if len(xval) == 0: + # If data is empty, skip drawing this model. + # This is the case when fit model exist but no data to fit is provided. + continue + # Compute X, Y values with fit parameters. + xval_fit = np.linspace(np.min(xval), np.max(xval), num=100, dtype=float) + yval_fit = eval_with_uncertainties( + x=xval_fit, + model=self._models[i], + params=fit_data.ufloat_params, + ) + model_fit = np.full((100, len(columns)), None, dtype=object) + fit_curves.append(model_fit) + model_fit[:, columns.index("xval")] = xval_fit + model_fit[:, columns.index("yval")] = unp.nominal_values(yval_fit) + if fit_data.covar is not None: + model_fit[:, columns.index("yerr")] = unp.std_devs(yval_fit) + model_fit[:, columns.index("model_name")] = name + model_fit[:, columns.index("model_id")] = i + curve_data = curve_data.append_list_values( + other=np.vstack(fit_curves), + prefix="fitted", ) - analysis_results.extend(primary_results) - self.plotter.set_supplementary_data(primary_results=primary_results) - - # Draw fit curves and report - if self.options.plot: - for model in self._models: - sub_data = formatted_data.get_subset_of(model._name) - if sub_data.x.size == 0: - # If data is empty, skip drawing this model. - # This is the case when fit model exist but no data to fit is provided. - # For example, experiment may omit experimenting with some setting. - continue - x_interp = np.linspace(np.min(sub_data.x), np.max(sub_data.x), num=100) - - y_data_with_uncertainty = eval_with_uncertainties( - x=x_interp, - model=model, - params=fit_data.ufloat_params, - ) - y_interp = unp.nominal_values(y_data_with_uncertainty) - # Add fit line data - self.plotter.set_series_data( - model._name, - x_interp=x_interp, - y_interp=y_interp, - ) - if fit_data.covar is not None: - # Add confidence interval data - y_interp_err = unp.std_devs(y_data_with_uncertainty) - if np.isfinite(y_interp_err).all(): - self.plotter.set_series_data( - model._name, - y_interp_err=y_interp_err, - ) - - # Add raw data points + analysis_results.extend( + self._create_analysis_results( + fit_data=fit_data, + quality=quality, + **self.options.extra.copy(), + ) + ) + if self.options.return_data_points: + # Add raw data points analysis_results.extend( - self._create_curve_data(curve_data=formatted_data, models=self._models) + self._create_curve_data( + curve_data=curve_data.filter(like="formatted", axis="index"), + ) ) - # Finalize plot - if self.options.plot: - return analysis_results, [self.plotter.figure()] + if plot_bool: + if fit_data.success: + self.plotter.set_supplementary_data( + fit_red_chi=fit_data.reduced_chisq, + primary_results=[r for r in analysis_results if not r.name.startswith("@")], + ) + figures.extend(self._create_figures(curve_data=curve_data)) - return analysis_results, [] + return analysis_results, figures def __getstate__(self): state = self.__dict__.copy() diff --git a/qiskit_experiments/curve_analysis/curve_data.py b/qiskit_experiments/curve_analysis/curve_data.py index c344956895..62214e9d9b 100644 --- a/qiskit_experiments/curve_analysis/curve_data.py +++ b/qiskit_experiments/curve_analysis/curve_data.py @@ -112,6 +112,15 @@ class CurveData: data_allocation: np.ndarray labels: List[str] + @deprecate_func( + since="0.6", + additional_msg="CurveData is replaced with 'ScatterTable' with dataframe representation.", + removal_timeline="after 0.7", + package_name="qiskit-experiments", + ) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + def get_subset_of(self, index: Union[str, int]) -> "CurveData": """Filter data by series name or index. diff --git a/qiskit_experiments/curve_analysis/scatter_table.py b/qiskit_experiments/curve_analysis/scatter_table.py new file mode 100644 index 0000000000..7d16cedd44 --- /dev/null +++ b/qiskit_experiments/curve_analysis/scatter_table.py @@ -0,0 +1,198 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2023. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. +"""Table representation of the x, y data for curve fitting.""" + +import logging +from typing import List, Sequence, Dict, Any, Union + +import numpy as np +import pandas as pd + +from qiskit.utils import deprecate_func + +from qiskit_experiments.framework.table_mixin import DefaultColumnsMixIn + + +LOG = logging.getLogger(__name__) + + +class ScatterTable(pd.DataFrame, DefaultColumnsMixIn): + """A table to store x and y data with metadata associated with the data point. + + This class is implemented upon the pandas dataframe. + See `pandas dataframe documentation `_ + for the base class API documentation. + + A single ``ScatterTable`` object can contain different kind of intermediate data + generated through the curve fitting, which are categorized by the fit model. + When an experiment has sub-data for ``model_abc``, the formatted x, y, and y-error + array data may be obtained from the original table object as follows: + + .. code-block::python + + formatted = table.filter(like="formatted", axis="index") + abc_data = formatted[formatted.model_name == "model_abc"] + x, y, e = abc_data.xval.to_numpy(), abc_data.yval.to_numpy(), abc_data.yerr.to_numpy() + + """ + + # TODO Add this to toctree. In current mechanism all pandas DataFrame members are rendered + # and it fails in the Sphinx build process. We may need a custom directive to + # exclude class members from an external package. + + @classmethod + def _default_columns(cls) -> List[str]: + return [ + "xval", + "yval", + "yerr", + "model_name", + "model_id", + "shots", + ] + + @deprecate_func( + since="0.6", + additional_msg="Curve data uses dataframe representation. Use dataframe filtering method.", + pending=True, + package_name="qiskit-experiments", + ) + def get_subset_of(self, index: Union[str, int]) -> "ScatterTable": + """Filter data by series name or index. + + Args: + index: Series index of name. + + Returns: + A subset of data corresponding to a particular series. + """ + if isinstance(index, int): + index = self.labels[index] + return self[self.model_name == index] + + @property + @deprecate_func( + since="0.6", + additional_msg="Curve data uses dataframe representation. Call .xval.to_numpy() instead.", + pending=True, + package_name="qiskit-experiments", + is_property=True, + ) + def x(self) -> np.ndarray: + """X values.""" + return self.xval.to_numpy() + + @property + @deprecate_func( + since="0.6", + additional_msg="Curve data uses dataframe representation. Call .yval.to_numpy() instead.", + pending=True, + package_name="qiskit-experiments", + is_property=True, + ) + def y(self) -> np.ndarray: + """Y values.""" + return self.yval.to_numpy() + + @property + @deprecate_func( + since="0.6", + additional_msg="Curve data uses dataframe representation. Call .yerr.to_numpy() instead.", + pending=True, + package_name="qiskit-experiments", + is_property=True, + ) + def y_err(self) -> np.ndarray: + """Standard deviation of y values.""" + return self.yerr.to_numpy() + + @property + @deprecate_func( + since="0.6", + additional_msg="Curve data uses dataframe representation. Call .shots.to_numpy() instead.", + pending=True, + package_name="qiskit-experiments", + is_property=True, + ) + def shots(self): + """Shot number of data points.""" + return self.shots.to_numpy() + + @property + @deprecate_func( + since="0.6", + additional_msg="Curve data uses dataframe representation. Call .model_id.to_numpy() instead.", + pending=True, + package_name="qiskit-experiments", + is_property=True, + ) + def data_allocation(self) -> np.ndarray: + """Index of corresponding fit model.""" + # pylint: disable=no-member + return self.model_id.to_numpy() + + @property + @deprecate_func( + since="0.6", + additional_msg="Curve data uses dataframe representation. Labels are a part of table.", + pending=True, + package_name="qiskit-experiments", + is_property=True, + ) + def labels(self) -> List[str]: + """List of model names.""" + # Order sensitive + name_id_tups = self.groupby(["model_name", "model_id"]).groups.keys() + return [k[0] for k in sorted(name_id_tups, key=lambda k: k[1])] + + def append_list_values( + self, + other: Sequence, + prefix: str, + ) -> "ScatterTable": + """Add another list of dataframe values to this dataframe. + + Args: + other: List of dataframe values to be added. + prefix: Prefix of row labels of the added values. + + Returns: + New scatter table instance including both self and added data. + """ + other_index = [f"{prefix}-{i:04d}" for i in range(len(other))] + return ScatterTable( + data=[*self.values, *other], + columns=self.columns, + index=[*self.index, *other_index], + ) + + def __json_encode__(self) -> Dict[str, Any]: + return { + "class": "ScatterTable", + "data": self.to_dict(orient="index"), + } + + @classmethod + def __json_decode__(cls, value: Dict[str, Any]) -> "ScatterTable": + if not value.get("class", None) == "ScatterTable": + raise ValueError("JSON decoded value for ScatterTable is not valid class type.") + + instance = cls.from_dict( + data=value.get("data", {}), + orient="index", + ).replace({np.nan: None}) + return instance + + @property + def _constructor(self): + # https://pandas.pydata.org/pandas-docs/stable/development/extending.html + return ScatterTable diff --git a/qiskit_experiments/curve_analysis/standard_analysis/bloch_trajectory.py b/qiskit_experiments/curve_analysis/standard_analysis/bloch_trajectory.py index 9e4885ba9b..a155eebe58 100644 --- a/qiskit_experiments/curve_analysis/standard_analysis/bloch_trajectory.py +++ b/qiskit_experiments/curve_analysis/standard_analysis/bloch_trajectory.py @@ -154,7 +154,7 @@ def _default_options(cls): def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. diff --git a/qiskit_experiments/curve_analysis/standard_analysis/decay.py b/qiskit_experiments/curve_analysis/standard_analysis/decay.py index 1ff1daaf58..4e6df069f1 100644 --- a/qiskit_experiments/curve_analysis/standard_analysis/decay.py +++ b/qiskit_experiments/curve_analysis/standard_analysis/decay.py @@ -65,7 +65,7 @@ def __init__( def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. @@ -98,13 +98,13 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared lower than three + - a reduced chi-squared lower than three and greater than zero - tau error is less than its value """ tau = fit_data.ufloat_params["tau"] criteria = [ - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, curve.utils.is_error_not_significant(tau), ] diff --git a/qiskit_experiments/curve_analysis/standard_analysis/error_amplification_analysis.py b/qiskit_experiments/curve_analysis/standard_analysis/error_amplification_analysis.py index 116430f2d9..fa224bd6b8 100644 --- a/qiskit_experiments/curve_analysis/standard_analysis/error_amplification_analysis.py +++ b/qiskit_experiments/curve_analysis/standard_analysis/error_amplification_analysis.py @@ -118,7 +118,7 @@ def _default_options(cls): def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. @@ -185,14 +185,14 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared lower than three, + - a reduced chi-squared lower than three and greater than zero, - a measured angle error that is smaller than the allowed maximum good angle error. This quantity is set in the analysis options. """ fit_d_theta = fit_data.ufloat_params["d_theta"] criteria = [ - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, abs(fit_d_theta.nominal_value) < abs(self.options.max_good_angle_error), ] diff --git a/qiskit_experiments/curve_analysis/standard_analysis/gaussian.py b/qiskit_experiments/curve_analysis/standard_analysis/gaussian.py index 2a17f54ac0..2d8cd973a8 100644 --- a/qiskit_experiments/curve_analysis/standard_analysis/gaussian.py +++ b/qiskit_experiments/curve_analysis/standard_analysis/gaussian.py @@ -88,7 +88,7 @@ def _default_options(cls) -> Options: def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. @@ -126,7 +126,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared less than 3, + - a reduced chi-squared less than 3 and greater than zero, - a peak within the scanned frequency range, - a standard deviation that is not larger than the scanned frequency range, - a standard deviation that is wider than the smallest frequency increment, @@ -149,7 +149,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: fit_data.x_range[0] <= fit_freq.n <= fit_data.x_range[1], 1.5 * freq_increment < fit_sigma.n, fit_width_ratio < 0.25, - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, curve.utils.is_error_not_significant(fit_sigma), snr > 2, ] diff --git a/qiskit_experiments/curve_analysis/standard_analysis/oscillation.py b/qiskit_experiments/curve_analysis/standard_analysis/oscillation.py index 461698c367..f173563c0e 100644 --- a/qiskit_experiments/curve_analysis/standard_analysis/oscillation.py +++ b/qiskit_experiments/curve_analysis/standard_analysis/oscillation.py @@ -72,7 +72,7 @@ def __init__( def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. @@ -111,7 +111,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared lower than three, + - a reduced chi-squared lower than three and greater than zero, - more than a quarter of a full period, - less than 10 full periods, and - an error on the fit frequency lower than the fit frequency. @@ -119,7 +119,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: fit_freq = fit_data.ufloat_params["freq"] criteria = [ - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, 1.0 / 4.0 < fit_freq.nominal_value < 10.0, curve.utils.is_error_not_significant(fit_freq), ] @@ -189,7 +189,7 @@ def __init__( def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. @@ -260,7 +260,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared lower than three + - a reduced chi-squared lower than three and greater than zero - relative error of tau is less than its value - relative error of freq is less than its value """ @@ -268,7 +268,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: freq = fit_data.ufloat_params["freq"] criteria = [ - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, curve.utils.is_error_not_significant(tau), curve.utils.is_error_not_significant(freq), ] diff --git a/qiskit_experiments/curve_analysis/standard_analysis/resonance.py b/qiskit_experiments/curve_analysis/standard_analysis/resonance.py index 558de514d8..375b4cc166 100644 --- a/qiskit_experiments/curve_analysis/standard_analysis/resonance.py +++ b/qiskit_experiments/curve_analysis/standard_analysis/resonance.py @@ -88,7 +88,7 @@ def _default_options(cls) -> Options: def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. @@ -126,7 +126,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared less than 3, + - a reduced chi-squared less than 3 and greater than zero, - a peak within the scanned frequency range, - a standard deviation that is not larger than the scanned frequency range, - a standard deviation that is wider than the smallest frequency increment, @@ -149,7 +149,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: fit_data.x_range[0] <= fit_freq.n <= fit_data.x_range[1], 1.5 * freq_increment < fit_kappa.n, fit_width_ratio < 0.25, - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, curve.utils.is_error_not_significant(fit_kappa), snr > 2, ] diff --git a/qiskit_experiments/curve_analysis/utils.py b/qiskit_experiments/curve_analysis/utils.py index 84d4613584..66a8855254 100644 --- a/qiskit_experiments/curve_analysis/utils.py +++ b/qiskit_experiments/curve_analysis/utils.py @@ -18,6 +18,7 @@ import asteval import lmfit import numpy as np +from qiskit.utils.deprecation import deprecate_func from qiskit.utils import detach_prefix from uncertainties import UFloat, wrap as wrap_function from uncertainties import unumpy @@ -222,6 +223,104 @@ def eval_with_uncertainties( return wrapfunc(x=x, **sub_params) +def shot_weighted_average( + yvals: np.ndarray, + yerrs: np.ndarray, + shots: np.ndarray, +) -> Tuple[float, float, float]: + """Compute shot based variance and weighted average of the categorized data frame. + + Sample is weighted by the shot number. + + Args: + yvals: Y values to average. + yerrs: Y errors to average. + shots: Number of shots used to obtain Y value and error. + + Returns: + Averaged Y value, Y error, and total shots. + """ + if len(yvals) == 1: + return yvals[0], yerrs[0], shots[0] + + if np.any(shots < -1): + # Shot number is unknown + return np.mean(yvals), np.nan, -1 + + total_shots = np.sum(shots) + weights = shots / total_shots + + avg_yval = np.sum(weights * yvals) + avg_yerr = np.sqrt(np.sum(weights**2 * yerrs**2)) + + return avg_yval, avg_yerr, total_shots + + +def inverse_weighted_variance( + yvals: np.ndarray, + yerrs: np.ndarray, + shots: np.ndarray, +) -> Tuple[float, float, int]: + """Compute inverse weighted variance and weighted average of the categorized data frame. + + Sample is weighted by the inverse of the data variance. + + Args: + yvals: Y values to average. + yerrs: Y errors to average. + shots: Number of shots used to obtain Y value and error. + + Returns: + Averaged Y value, Y error, and total shots. + """ + if len(yvals) == 1: + return yvals[0], yerrs[0], shots[0] + + total_shots = np.sum(shots) if all(shots > 0) else -1 + weights = 1 / yerrs**2 + yvar = 1 / np.sum(weights) + + avg_yval = yvar * np.sum(weights * yvals) + avg_yerr = np.sqrt(yvar) + + return avg_yval, avg_yerr, total_shots + + +# pylint: disable=unused-argument +def sample_average( + yvals: np.ndarray, + yerrs: np.ndarray, + shots: np.ndarray, +) -> Tuple[float, float, int]: + """Compute sample based variance and average of the categorized data frame. + + Original variance of the data is ignored and variance is computed with the y values. + + Args: + yvals: Y values to average. + yerrs: Y errors to average (ignored). + shots: Number of shots used to obtain Y value and error. + + Returns: + Averaged Y value, Y error, and total shots. + """ + if len(yvals) == 1: + return yvals[0], 0.0, shots[0] + + total_shots = np.sum(shots) if all(shots > 0) else -1 + + avg_yval = np.mean(yvals) + avg_yerr = np.sqrt(np.mean((avg_yval - yvals) ** 2) / len(yvals)) + + return avg_yval, avg_yerr, total_shots + + +@deprecate_func( + since="0.6", + additional_msg="The curve data representation is replaced with dataframe format.", + package_name="qiskit-experiments", + pending=True, +) def filter_data(data: List[Dict[str, any]], **filters) -> List[Dict[str, any]]: """Return the list of filtered data @@ -249,6 +348,12 @@ def filter_data(data: List[Dict[str, any]], **filters) -> List[Dict[str, any]]: return filtered_data +@deprecate_func( + since="0.6", + additional_msg="The curve data representation is replaced with dataframe format.", + package_name="qiskit-experiments", + pending=True, +) def mean_xy_data( xdata: np.ndarray, ydata: np.ndarray, @@ -369,6 +474,12 @@ def mean_xy_data( raise QiskitError(f"Unsupported method {method}") +@deprecate_func( + since="0.6", + additional_msg="The curve data representation is replaced with dataframe format.", + package_name="qiskit-experiments", + pending=True, +) def multi_mean_xy_data( series: np.ndarray, xdata: np.ndarray, @@ -427,6 +538,12 @@ def multi_mean_xy_data( ) +@deprecate_func( + since="0.6", + additional_msg="The curve data representation is replaced with dataframe format.", + package_name="qiskit-experiments", + pending=True, +) def data_sort( series: np.ndarray, xdata: np.ndarray, diff --git a/qiskit_experiments/data_processing/data_processor.py b/qiskit_experiments/data_processing/data_processor.py index 80a639b123..6be3761650 100644 --- a/qiskit_experiments/data_processing/data_processor.py +++ b/qiskit_experiments/data_processing/data_processor.py @@ -48,14 +48,14 @@ class DataProcessor: A DataProcessor defines a sequence of operations to perform on experimental data. Calling an instance of DataProcessor applies this sequence on the input argument. A DataProcessor is created with a list of DataAction instances. Each DataAction - applies its _process method on the data and returns the processed data. The nodes + applies its ``_process`` method on the data and returns the processed data. The nodes in the DataProcessor may also perform data validation and some minor formatting. The output of one data action serves as input for the next data action. - DataProcessor.__call__(datum) usually takes in an entry from the data property of + ``DataProcessor.__call__(datum)`` usually takes in an entry from the data property of an ExperimentData object (i.e. a dict containing metadata and memory keys and possibly counts, like the Result.data property) and produces the formatted data. - DataProcessor.__call__(datum) extracts the data from the given datum under - DataProcessor._input_key (which is specified at initialization) of the given datum. + ``DataProcessor.__call__(datum)`` extracts the data from the given datum under + ``DataProcessor._input_key`` (which is specified at initialization) of the given datum. """ def __init__( diff --git a/qiskit_experiments/database_service/utils.py b/qiskit_experiments/database_service/utils.py index 81740546ca..928d3ac43b 100644 --- a/qiskit_experiments/database_service/utils.py +++ b/qiskit_experiments/database_service/utils.py @@ -12,6 +12,7 @@ """Experiment utility functions.""" +import importlib.metadata import io import logging import threading @@ -19,33 +20,25 @@ from abc import ABC, abstractmethod from collections import OrderedDict from datetime import datetime, timezone -from typing import Callable, Tuple, List, Dict, Any, Union, Type, Optional +from typing import Callable, Tuple, Dict, Any, Union, Type, Optional import json -import pandas as pd import dateutil.parser -import pkg_resources from dateutil import tz -from qiskit.version import __version__ as terra_version - from qiskit_ibm_experiment import ( IBMExperimentEntryExists, IBMExperimentEntryNotFound, ) from .exceptions import ExperimentEntryNotFound, ExperimentEntryExists, ExperimentDataError -from ..version import __version__ as experiments_version LOG = logging.getLogger(__name__) def qiskit_version(): """Return the Qiskit version.""" - try: - return pkg_resources.get_distribution("qiskit").version - except Exception: # pylint: disable=broad-except - return {"qiskit-terra": terra_version, "qiskit-experiments": experiments_version} + return {p: importlib.metadata.distribution(p).version for p in ("qiskit", "qiskit-experiments")} def parse_timestamp(utc_dt: Union[datetime, str]) -> datetime: @@ -278,199 +271,3 @@ def append(self, value): """Append to the list.""" with self._lock: self._container.append(value) - - -class ThreadSafeDataFrame(ThreadSafeContainer): - """Thread safe data frame. - - This class wraps pandas dataframe with predefined column labels, - which is specified by the class method `_default_columns`. - Subclass can override this method to provide default labels specific to its data structure. - - This object is expected to be used internally in the ExperimentData. - """ - - def __init__(self, init_values=None): - """ThreadSafeContainer constructor.""" - self._columns = self._default_columns() - self._extra = [] - super().__init__(init_values) - - @classmethod - def _default_columns(cls) -> List[str]: - return [] - - def _init_container(self, init_values: Optional[Union[Dict, pd.DataFrame]] = None): - """Initialize the container.""" - if init_values is None: - return pd.DataFrame(columns=self.get_columns()) - if isinstance(init_values, pd.DataFrame): - input_columns = list(init_values.columns) - if input_columns != self.get_columns(): - raise ValueError( - f"Input data frame contains unexpected columns {input_columns}. " - f"{self.__class__.__name__} defines {self.get_columns()} as default columns." - ) - return init_values - if isinstance(init_values, dict): - return pd.DataFrame.from_dict( - data=init_values, - orient="index", - columns=self.get_columns(), - ) - raise TypeError(f"Initial value of {type(init_values)} is not valid data type.") - - def get_columns(self) -> List[str]: - """Return current column names. - - Returns: - List of column names. - """ - with self._lock: - return self._columns.copy() - - def add_columns(self, *new_columns: str, default_value: Any = None): - """Add new columns to the table. - - This operation mutates the current container. - - Args: - new_columns: Name of columns to add. - default_value: Default value to fill added columns. - """ - with self._lock: - # Order sensitive - new_columns = [c for c in new_columns if c not in self.get_columns()] - if len(new_columns) == 0: - return - - # Update columns - for new_column in new_columns: - self._container.insert(len(self._container.columns), new_column, default_value) - self._columns.extend(new_columns) - self._extra.extend(new_columns) - - def clear(self): - """Remove all elements from this container.""" - with self._lock: - self._container = self._init_container() - self._columns = self._default_columns() - self._extra = [] - - def container( - self, - collapse_extra: bool = True, - ) -> pd.DataFrame: - """Return bare pandas dataframe. - - Args: - collapse_extra: Set True to show only default columns. - - Returns: - Bare pandas dataframe. This object is no longer thread safe. - """ - with self._lock: - container = self._container.copy() - - if collapse_extra: - return container[self._default_columns()] - return container - - def drop_entry( - self, - index: str, - ): - """Drop entry from the dataframe. - - Args: - index: Name of entry to drop. - - Raises: - ValueError: When index is not in this table. - """ - with self._lock: - if index not in self._container.index: - raise ValueError(f"Table index {index} doesn't exist in this table.") - self._container.drop(index, inplace=True) - - def get_entry( - self, - index: str, - ) -> pd.Series: - """Get entry from the dataframe. - - Args: - index: Name of entry to acquire. - - Returns: - Pandas Series of acquired entry. This doesn't mutate the table. - - Raises: - ValueError: When index is not in this table. - """ - with self._lock: - if index not in self._container.index: - raise ValueError(f"Table index {index} doesn't exist in this table.") - - return self._container.loc[index] - - def add_entry( - self, - index: str, - **kwargs, - ) -> pd.Series: - """Add new entry to the dataframe. - - Args: - index: Name of this entry. Must be unique in this table. - kwargs: Description of new entry to register. - - Returns: - Pandas Series of added entry. This doesn't mutate the table. - - Raises: - ValueError: When index is not unique in this table. - """ - with self._lock: - if index in self._container.index: - raise ValueError(f"Table index {index} already exists in the table.") - - if kwargs.keys() - set(self.get_columns()): - self.add_columns(*kwargs.keys()) - - template = dict.fromkeys(self.get_columns()) - template.update(kwargs) - - if not isinstance(index, str): - index = str(index) - self._container.loc[index] = list(template.values()) - - return self._container.iloc[-1] - - def _repr_html_(self) -> Union[str, None]: - """Return HTML representation of this dataframe.""" - with self._lock: - # Remove underscored columns. - return self._container._repr_html_() - - def __json_encode__(self) -> Dict[str, Any]: - with self._lock: - return { - "class": "ThreadSafeDataFrame", - "data": self._container.to_dict(orient="index"), - "columns": self._columns, - "extra": self._extra, - } - - @classmethod - def __json_decode__(cls, value: Dict[str, Any]) -> "ThreadSafeDataFrame": - if not value.get("class", None) == "ThreadSafeDataFrame": - raise ValueError("JSON decoded value for ThreadSafeDataFrame is not valid class type.") - - instance = object.__new__(cls) - # Need to update self._columns first to set extra columns in the dataframe container. - instance._columns = value.get("columns", cls._default_columns()) - instance._extra = value.get("extra", []) - instance._lock = threading.RLock() - instance._container = instance._init_container(init_values=value.get("data", {})) - return instance diff --git a/qiskit_experiments/framework/__init__.py b/qiskit_experiments/framework/__init__.py index c9a480e09e..c6d9ccbae8 100644 --- a/qiskit_experiments/framework/__init__.py +++ b/qiskit_experiments/framework/__init__.py @@ -101,6 +101,7 @@ .. autosummary:: :toctree: ../stubs/ + CompositeExperiment ParallelExperiment BatchExperiment CompositeAnalysis @@ -143,6 +144,7 @@ from .composite import ( ParallelExperiment, BatchExperiment, + CompositeExperiment, CompositeAnalysis, ) from .json import ExperimentEncoder, ExperimentDecoder diff --git a/qiskit_experiments/framework/analysis_result_table.py b/qiskit_experiments/framework/analysis_result_table.py index 053655a2a7..16b8716874 100644 --- a/qiskit_experiments/framework/analysis_result_table.py +++ b/qiskit_experiments/framework/analysis_result_table.py @@ -13,33 +13,24 @@ """Table representation of analysis results.""" import logging +import threading import re import uuid import warnings -from typing import List, Union, Optional +from typing import List, Dict, Union, Optional, Any +import numpy as np import pandas as pd -from qiskit_experiments.database_service.utils import ThreadSafeDataFrame - -LOG = logging.getLogger(__name__) +from qiskit_experiments.database_service.utils import ThreadSafeContainer +from .table_mixin import DefaultColumnsMixIn -class AnalysisResultTable(ThreadSafeDataFrame): - """Table form container of analysis results. - - This table is a dataframe wrapper with the thread-safe mechanism with predefined columns. - This object is attached to the :class:`.ExperimentData` container to store - analysis results. Each table row contains series of metadata in addition to the - result value itself. +LOG = logging.getLogger(__name__) - User can rely on the dataframe filtering mechanism to analyze large scale experiment - results, e.g. massive parallel experiment and batch experiment outcomes, efficiently. - See `pandas dataframe documentation `_ - for more details. - """ - VALID_ID_REGEX = re.compile(r"\A(?P\w{8})-\w{4}-\w{4}-\w{4}-\w{12}\Z") +class AnalysisResultContainer(pd.DataFrame, DefaultColumnsMixIn): + """Data container of the thread-safe analysis result table.""" @classmethod def _default_columns(cls) -> List[str]: @@ -57,6 +48,33 @@ def _default_columns(cls) -> List[str]: "created_time", ] + @property + def _constructor(self): + # https://pandas.pydata.org/pandas-docs/stable/development/extending.html + return AnalysisResultContainer + + +class AnalysisResultTable(ThreadSafeContainer): + """A thread-safe table form container of analysis results. + + This table is a dataframe wrapper with the thread-safe mechanism with predefined columns. + This object is attached to the :class:`.ExperimentData` container to store + analysis results. Each table row contains series of metadata in addition to the + result value itself. + + User can rely on the dataframe filtering mechanism to analyze large scale experiment + results, e.g. massive parallel experiment and batch experiment outcomes, efficiently. + See `pandas dataframe documentation `_ + for more details. + """ + + VALID_ID_REGEX = re.compile(r"\A(?P\w{8})-\w{4}-\w{4}-\w{4}-\w{12}\Z") + + def _init_container(self, init_values: Any): + if init_values is None: + return AnalysisResultContainer() + return init_values + def result_ids(self) -> List[str]: """Return all result IDs in this table.""" with self._lock: @@ -67,20 +85,21 @@ def filter_columns(self, columns: Union[str, List[str]]) -> List[str]: Args: columns: Specifying a set of columns to return. You can pass a list of each - column name to return, otherwise builtin column groups are available. + column name to return, otherwise builtin column groups are available: + + * ``all``: Return all columns, including metadata to communicate + with experiment service, such as entry IDs. + * ``default``: Return columns including analysis result with supplementary + information about experiment. + * ``minimal``: Return only analysis subroutine returns. - * "all": Return all columns, including metadata to communicate - with experiment service, such as entry IDs. - * "default": Return columns including analysis result with supplementary - information about experiment. - * "minimal": Return only analysis subroutine returns. Raises: ValueError: When column is given in string which doesn't match with any builtin group. """ with self._lock: if columns == "all": - return self._columns + return self._container.columns if columns == "default": return [ "name", @@ -90,18 +109,18 @@ def filter_columns(self, columns: Union[str, List[str]]) -> List[str]: "quality", "backend", "run_time", - ] + self._extra + ] + self._container.extra_columns() if columns == "minimal": return [ "name", "components", "value", "quality", - ] + self._extra + ] + self._container.extra_columns() if not isinstance(columns, str): out = [] for column in columns: - if column in self._columns: + if column in self._container.columns: out.append(column) else: warnings.warn( @@ -113,6 +132,27 @@ def filter_columns(self, columns: Union[str, List[str]]) -> List[str]: f"Column group {columns} is not valid name. Use either 'all', 'default', 'minimal'." ) + def get_entry( + self, + index: str, + ) -> pd.Series: + """Get entry from the dataframe. + + Args: + index: Name of entry to acquire. + + Returns: + Pandas Series of acquired entry. This doesn't mutate the table. + + Raises: + ValueError: When index is not in this table. + """ + with self._lock: + if index not in self._container.index: + raise ValueError(f"Table index {index} doesn't exist in this table.") + + return self._container.loc[index] + # pylint: disable=arguments-renamed def add_entry( self, @@ -156,11 +196,35 @@ def add_entry( "experiment data. Please use another ID to avoid index collision." ) - return super().add_entry( - index=short_id, - result_id=result_id, - **kwargs, - ) + return self._container.add_entry( + index=short_id, + result_id=result_id, + **kwargs, + ) + + def drop_entry( + self, + index: str, + ): + """Drop specified labels from rows. + + This directly calls :meth:`.drop` of the DataFrame container object. + + Args: + index: Name of entry to drop. + + Raises: + ValueError: When index is not in this table. + """ + with self._lock: + if index not in self._container.index: + raise ValueError(f"Table index {index} doesn't exist in this table.") + self._container.drop(index, inplace=True) + + def clear(self): + """Remove all elements from this container.""" + with self._lock: + self._container = AnalysisResultContainer() def _unique_table_index(self): """Generate unique UUID which is unique in the table with first 8 characters.""" @@ -174,3 +238,28 @@ def _unique_table_index(self): "Unique result_id string cannot be prepared for this table within 1000 trials. " "Reduce number of entries, or manually provide a unique result_id." ) + + def _repr_html_(self) -> Union[str, None]: + """Return HTML representation of this dataframe.""" + with self._lock: + return self._container._repr_html_() + + def __json_encode__(self) -> Dict[str, Any]: + with self._lock: + return { + "class": "AnalysisResultTable", + "data": self._container.to_dict(orient="index"), + } + + @classmethod + def __json_decode__(cls, value: Dict[str, Any]) -> "AnalysisResultTable": + if not value.get("class", None) == "AnalysisResultTable": + raise ValueError("JSON decoded value for AnalysisResultTable is not valid class type.") + + instance = object.__new__(cls) + instance._lock = threading.RLock() + instance._container = AnalysisResultContainer.from_dict( + data=value.get("data", {}), + orient="index", + ).replace({np.nan: None}) + return instance diff --git a/qiskit_experiments/framework/backend_data.py b/qiskit_experiments/framework/backend_data.py index ee2e9567d1..93ed671d6d 100644 --- a/qiskit_experiments/framework/backend_data.py +++ b/qiskit_experiments/framework/backend_data.py @@ -32,10 +32,10 @@ def __init__(self, backend): self._parse_additional_data() def _parse_additional_data(self): - # data specific parsing not done yet in qiskit-terra + # data specific parsing not done yet in upstream qiskit if hasattr(self._backend, "_conf_dict") and self._backend._conf_dict["open_pulse"]: if "u_channel_lo" not in self._backend._conf_dict: - self._backend._conf_dict["u_channel_lo"] = [] # to avoid terra bug + self._backend._conf_dict["u_channel_lo"] = [] # to avoid qiskit bug self._pulse_conf = PulseBackendConfiguration.from_dict(self._backend._conf_dict) @property @@ -146,10 +146,7 @@ def acquire_alignment(self): if self._v1: return self._backend.configuration().timing_constraints.get("acquire_alignment", 1) elif self._v2: - # currently has a typo in terra - if hasattr(self._backend.target, "acquire_alignment"): - return self._backend.target.acquire_alignment - return self._backend.target.aquire_alignment + return self._backend.target.acquire_alignment except AttributeError: return 1 return 1 @@ -225,7 +222,7 @@ def meas_freqs(self): .. note:: - The qiskit-terra base classes do not provide this information as a + The qiskit base classes do not provide this information as a standard backend property, but it is available from some providers in the data returned by the ``Backend.defaults()`` method. """ @@ -252,7 +249,7 @@ def is_simulator(self): For `BackendV2` we sometimes cannot be sure, because it lacks a `simulator` field, as was present in `BackendV1`'s configuration. We still check whether the backend inherits `FakeBackendV2`, for - either of its existing implementations in Terra. + either of its existing implementations in Qiskit. """ if self._v1: if self._backend.configuration().simulator or isinstance(self._backend, FakeBackend): diff --git a/qiskit_experiments/framework/base_analysis.py b/qiskit_experiments/framework/base_analysis.py index 7122b6f548..acced5544d 100644 --- a/qiskit_experiments/framework/base_analysis.py +++ b/qiskit_experiments/framework/base_analysis.py @@ -17,6 +17,7 @@ from collections import OrderedDict from datetime import datetime from typing import List, Tuple, Union, Dict +import warnings from dateutil import tz @@ -166,6 +167,9 @@ def run_analysis(expdata: ExperimentData): # Clearing previous analysis data experiment_data._clear_results() + if not expdata.data(): + warnings.warn("ExperimentData object data is empty.\n") + # Making new analysis results, figures = analysis._run_analysis(expdata) diff --git a/qiskit_experiments/framework/base_experiment.py b/qiskit_experiments/framework/base_experiment.py index bb14cdc610..41240df41c 100644 --- a/qiskit_experiments/framework/base_experiment.py +++ b/qiskit_experiments/framework/base_experiment.py @@ -53,7 +53,7 @@ def __init__( QiskitError: If qubits contains duplicates. """ # Experiment identification metadata - self._type = experiment_type if experiment_type else type(self).__name__ + self.experiment_type = experiment_type # Circuit parameters self._num_qubits = len(physical_qubits) @@ -90,6 +90,14 @@ def experiment_type(self) -> str: """Return experiment type.""" return self._type + @experiment_type.setter + def experiment_type(self, exp_type: str) -> None: + """Set the type for the experiment.""" + if exp_type is None: + self._type = type(self).__name__ + else: + self._type = exp_type + @property def physical_qubits(self) -> Tuple[int, ...]: """Return the device qubits for the experiment.""" @@ -265,20 +273,72 @@ def _finalize(self): """ pass - def _run_jobs(self, circuits: List[QuantumCircuit], **run_options) -> List[Job]: - """Run circuits on backend as 1 or more jobs.""" + def _max_circuits(self, backend: Backend = None): + """ + Calculate the maximum number of circuits per job for the experiment. + """ + + # set backend + if backend is None: + if self.backend is None: + raise QiskitError("A backend must be provided.") + backend = self.backend # Get max circuits for job splitting max_circuits_option = getattr(self.experiment_options, "max_circuits", None) - max_circuits_backend = self._backend_data.max_circuits + max_circuits_backend = BackendData(backend).max_circuits + if max_circuits_option and max_circuits_backend: - max_circuits = min(max_circuits_option, max_circuits_backend) + return min(max_circuits_option, max_circuits_backend) elif max_circuits_option: - max_circuits = max_circuits_option + return max_circuits_option else: - max_circuits = max_circuits_backend + return max_circuits_backend + + def job_info(self, backend: Backend = None): + """ + Get information about job distribution for the experiment on a specific + backend. + + Args: + backend: Optional, the backend for which to get job distribution + information. If not specified, the experiment must already have a + set backend. + + Returns: + dict: A dictionary containing information about job distribution. + + - "Total number of circuits in the experiment": Total number of + circuits in the experiment. + + - "Maximum number of circuits per job": Maximum number of + circuits in one job based on backend and experiment settings. + + - "Total number of jobs": Number of jobs needed to run this + experiment on the currently set backend. + + Raises: + QiskitError: if backend is not specified. + + """ + max_circuits = self._max_circuits(backend) + total_circuits = len(self.circuits()) + + if max_circuits is None: + num_jobs = 1 + else: + num_jobs = (total_circuits + max_circuits - 1) // max_circuits + return { + "Total number of circuits in the experiment": total_circuits, + "Maximum number of circuits per job": max_circuits, + "Total number of jobs": num_jobs, + } + + def _run_jobs(self, circuits: List[QuantumCircuit], **run_options) -> List[Job]: + """Run circuits on backend as 1 or more jobs.""" + max_circuits = self._max_circuits(self.backend) # Run experiment jobs - if max_circuits and len(circuits) > max_circuits: + if max_circuits and (len(circuits) > max_circuits): # Split jobs for backends that have a maximum job size job_circuits = [ circuits[i : i + max_circuits] for i in range(0, len(circuits), max_circuits) diff --git a/qiskit_experiments/framework/composite/__init__.py b/qiskit_experiments/framework/composite/__init__.py index d308f3f38c..de0df5604a 100644 --- a/qiskit_experiments/framework/composite/__init__.py +++ b/qiskit_experiments/framework/composite/__init__.py @@ -13,6 +13,7 @@ """Composite Experiments""" # Base classes +from .composite_experiment import CompositeExperiment from .composite_analysis import CompositeAnalysis # Composite experiment classes diff --git a/qiskit_experiments/framework/composite/batch_experiment.py b/qiskit_experiments/framework/composite/batch_experiment.py index c7f66cdca3..7ef50ac2a0 100644 --- a/qiskit_experiments/framework/composite/batch_experiment.py +++ b/qiskit_experiments/framework/composite/batch_experiment.py @@ -49,6 +49,7 @@ def __init__( backend: Optional[Backend] = None, flatten_results: bool = None, analysis: Optional[CompositeAnalysis] = None, + experiment_type: Optional[str] = None, ): """Initialize a batch experiment. @@ -86,7 +87,12 @@ def __init__( logical_qubit += 1 qubits = tuple(self._qubit_map.keys()) super().__init__( - experiments, qubits, backend=backend, analysis=analysis, flatten_results=flatten_results + experiments, + qubits, + backend=backend, + analysis=analysis, + flatten_results=flatten_results, + experiment_type=experiment_type, ) def circuits(self): diff --git a/qiskit_experiments/framework/composite/composite_analysis.py b/qiskit_experiments/framework/composite/composite_analysis.py index 85e8baf0a0..66f6b1642a 100644 --- a/qiskit_experiments/framework/composite/composite_analysis.py +++ b/qiskit_experiments/framework/composite/composite_analysis.py @@ -52,7 +52,12 @@ class CompositeAnalysis(BaseAnalysis): experiment data. """ - def __init__(self, analyses: List[BaseAnalysis], flatten_results: bool = None): + def __init__( + self, + analyses: List[BaseAnalysis], + flatten_results: bool = None, + generate_figures: Optional[str] = "always", + ): """Initialize a composite analysis class. Args: @@ -62,6 +67,9 @@ def __init__(self, analyses: List[BaseAnalysis], flatten_results: bool = None): nested composite experiments. If False save each component experiment results as a separate child ExperimentData container. + generate_figures: Optional flag to set the figure generation behavior. + If ``always``, figures are always generated. If ``never``, figures are never generated. + If ``selective``, figures are generated if the analysis ``quality`` is ``bad``. """ if flatten_results is None: # Backward compatibility for 0.6 @@ -79,6 +87,8 @@ def __init__(self, analyses: List[BaseAnalysis], flatten_results: bool = None): if flatten_results: self._set_flatten_results() + self._set_generate_figures(generate_figures) + def component_analysis( self, index: Optional[int] = None ) -> Union[BaseAnalysis, List[BaseAnalysis]]: @@ -96,6 +106,14 @@ def component_analysis( return self._analyses return self._analyses[index] + def set_options(self, **fields): + """Set the analysis options for the experiment. If the `broadcast` argument set to `True`, the + analysis options will cascade to the child experiments.""" + super().set_options(**fields) + if fields.get("broadcast", None): + for sub_analysis in self._analyses: + sub_analysis.set_options(**fields) + def copy(self): ret = super().copy() # Recursively copy analysis @@ -113,7 +131,7 @@ def run( experiment_data = experiment_data.copy() if not self._flatten_results: - # Initialize child components if they are not initalized + # Initialize child components if they are not initialized # This only needs to be done if results are not being flattened self._add_child_data(experiment_data) @@ -342,6 +360,15 @@ def _set_flatten_results(self): if isinstance(analysis, CompositeAnalysis): analysis._set_flatten_results() + def _set_generate_figures(self, generate_figures): + """Recursively propagate ``generate_figures`` to all child experiments.""" + self._generate_figures = generate_figures + for analysis in self._analyses: + if isinstance(analysis, CompositeAnalysis): + analysis._set_generate_figures(generate_figures) + else: + analysis._generate_figures = generate_figures + def _combine_results( self, component_experiment_data: List[ExperimentData], diff --git a/qiskit_experiments/framework/composite/parallel_experiment.py b/qiskit_experiments/framework/composite/parallel_experiment.py index 91031051ff..24f901a292 100644 --- a/qiskit_experiments/framework/composite/parallel_experiment.py +++ b/qiskit_experiments/framework/composite/parallel_experiment.py @@ -49,6 +49,7 @@ def __init__( backend: Optional[Backend] = None, flatten_results: bool = None, analysis: Optional[CompositeAnalysis] = None, + experiment_type: Optional[str] = None, ): """Initialize the analysis object. @@ -79,7 +80,12 @@ def __init__( for exp in experiments: qubits += exp.physical_qubits super().__init__( - experiments, qubits, backend=backend, analysis=analysis, flatten_results=flatten_results + experiments, + qubits, + backend=backend, + analysis=analysis, + flatten_results=flatten_results, + experiment_type=experiment_type, ) def circuits(self): diff --git a/qiskit_experiments/framework/experiment_data.py b/qiskit_experiments/framework/experiment_data.py index 95ac0c5ac4..c42f610ad4 100644 --- a/qiskit_experiments/framework/experiment_data.py +++ b/qiskit_experiments/framework/experiment_data.py @@ -196,7 +196,7 @@ def _repr_svg_(self): return None -_FigureT = Union[str, bytes, MatplotlibFigure, FigureData] +FigureT = Union[str, bytes, MatplotlibFigure, FigureData] class ExperimentData: @@ -205,7 +205,7 @@ class ExperimentData: .. note:: Saving experiment data to the cloud database is currently a limited access feature. You can check whether you have access by logging into the IBM Quantum interface - and seeing if you can see the `database `__. + and seeing if you can see the `database `__. This class handles the following: @@ -1071,6 +1071,13 @@ def _retrieve_data(self): """Retrieve job data if missing experiment data.""" # Get job results if missing in experiment data. if self.provider is None: + # 'self._result_data' could be locked, so I check a copy of it. + if not self._result_data.copy(): + # Adding warning so the user will have indication why the analysis may fail. + LOG.warning( + "Provider for ExperimentData object doesn't exist, resulting in a failed attempt to" + " retrieve data from the server; no stored result data exists" + ) return retrieved_jobs = {} jobs_to_retrieve = [] # the list of all jobs to retrieve from the server @@ -1134,7 +1141,7 @@ def data( @do_auto_save def add_figures( self, - figures: Union[_FigureT, List[_FigureT]], + figures: Union[FigureT, List[FigureT]], figure_names: Optional[Union[str, List[str]]] = None, overwrite: bool = False, save_figure: Optional[bool] = None, @@ -1311,6 +1318,12 @@ def figure( raise ExperimentEntryNotFound(f"Figure {figure_key} not found.") figure_key = self._figures.keys()[figure_key] + # All figures must have '.svg' in their names when added, as the extension is added to the key + # name in the `add_figures()` method of this class. + if isinstance(figure_key, str): + if not figure_key.endswith(".svg"): + figure_key += ".svg" + figure_data = self._figures.get(figure_key, None) if figure_data is None and self.service: figure = self.service.figure(experiment_id=self.experiment_id, figure_name=figure_key) @@ -1405,7 +1418,7 @@ def add_analysis_results( tags = tags or [] backend = backend or self.backend_name - series = self._analysis_results.add_entry( + self._analysis_results.add_entry( result_id=result_id, name=name, value=value, @@ -1420,8 +1433,9 @@ def add_analysis_results( **extra_values, ) if self.auto_save: + last_index = self._analysis_results.result_ids()[-1][:8] service_result = _series_to_service_result( - series=series, + series=self._analysis_results.get_entry(last_index), service=self._service, auto_save=False, ) @@ -1521,25 +1535,25 @@ def analysis_results( index: Index of the analysis result to be returned. Several types are accepted for convenience: - * None: Return all analysis results. - * int: Specific index of the analysis results. - * slice: A list slice of indexes. - * str: ID or name of the analysis result. + * None: Return all analysis results. + * int: Specific index of the analysis results. + * slice: A list slice of indexes. + * str: ID or name of the analysis result. refresh: Retrieve the latest analysis results from the server, if an experiment service is available. - block: If True block for any analysis callbacks to finish running. + block: If ``True``, block for any analysis callbacks to finish running. timeout: max time in seconds to wait for analysis callbacks to finish running. columns: Specifying a set of columns to return. You can pass a list of each - column name to return, otherwise builtin column groups are available. + column name to return, otherwise builtin column groups are available: - * "all": Return all columns, including metadata to communicate - with experiment service, such as entry IDs. - * "default": Return columns including analysis result with supplementary - information about experiment. - * "minimal": Return only analysis subroutine returns. + * ``all``: Return all columns, including metadata to communicate + with the experiment service, such as entry IDs. + * ``default``: Return columns including analysis result with supplementary + information about experiment. + * ``minimal``: Return only analysis subroutine returns. - dataframe: Set True to return analysis results in the dataframe format. + dataframe: Set to ``True`` to return analysis results in the dataframe format. Returns: Analysis results for this experiment. @@ -1553,7 +1567,7 @@ def analysis_results( ) self._retrieve_analysis_results(refresh=refresh) - out = self._analysis_results.container(collapse_extra=False) + out = self._analysis_results.copy() if index is not None: out = _filter_analysis_results(index, out) @@ -1674,7 +1688,7 @@ def save( Args: suppress_errors: should the method catch exceptions (true) or - pass them on, potentially aborting the experiment (false) + pass them on, potentially aborting the experiment (false) max_workers: Maximum number of concurrent worker threads (capped by 10) save_figures: Whether to save figures in the database or not save_children: For composite experiments, whether to save children as well @@ -1715,7 +1729,7 @@ def save( return analysis_results_to_create = [] - for _, series in self._analysis_results.container(collapse_extra=False).iterrows(): + for _, series in self._analysis_results.copy().iterrows(): # TODO We should support saving entire dataframe # Calling API per entry takes huge amount of time. legacy_result = _series_to_service_result( @@ -1772,7 +1786,7 @@ def save( if not self.service.local and self.verbose: print( "You can view the experiment online at " - f"https://quantum-computing.ibm.com/experiments/{self.experiment_id}" + f"https://quantum.ibm.com/experiments/{self.experiment_id}" ) # handle children, but without additional prints if save_children: @@ -2286,6 +2300,7 @@ def copy(self, copy_results: bool = True) -> "ExperimentData": new_instance = ExperimentData( backend=self.backend, service=self.service, + provider=self.provider, parent_id=self.parent_id, job_ids=self.job_ids, child_data=list(self._child_data.values()), @@ -2516,7 +2531,7 @@ def get_service_from_backend(backend): @staticmethod def get_service_from_provider(provider): """Initializes the service from the provider data""" - db_url = "https://auth.quantum-computing.ibm.com/api" + db_url = "https://auth.quantum.ibm.com/api" try: # qiskit-ibmq-provider style if hasattr(provider, "credentials"): diff --git a/qiskit_experiments/framework/table_mixin.py b/qiskit_experiments/framework/table_mixin.py new file mode 100644 index 0000000000..fc59745199 --- /dev/null +++ b/qiskit_experiments/framework/table_mixin.py @@ -0,0 +1,109 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2023. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +# pylint: disable=no-member + +"""MinIn class for pandas dataframe.""" +from typing import List, Callable +from functools import wraps + +import pandas as pd + + +class DefaultColumnsMixIn: + """A mixin that sets a default data columns to a dataframe subclass. + + Subclass must define _default_columns class method to provide default column names. + This name list is order sensitive and the first element will show up at the + most left column of the dataframe table. + + .. note:: + + This Mix-in class is designed for use with the pandas DataFrame. + Implementation of this class may change in the future without notification + when we switch to other data container. + + """ + + _default_columns: Callable + + def __init_subclass__(cls, **kwargs): + # To make sure the mixin constructor is called. + super().__init_subclass__(**kwargs) + + @wraps(cls.__init__, assigned=("__annotations__",)) + def _call_init_and_set_columns(self, *init_args, **init_kwargs): + super(cls, self).__init__(*init_args, **init_kwargs) + if len(self.columns) == 0: + self.add_columns(*cls._default_columns()) + + # Monkey patch the mixed class constructor to make sure default columns are added + cls.__init__ = _call_init_and_set_columns + + def add_columns( + self: pd.DataFrame, + *new_columns: str, + ): + """Add new columns to the table. + + This operation mutates the current container. + + Args: + new_columns: Name of columns to add. + """ + # Order sensitive + new_columns = [c for c in new_columns if c not in self.columns] + if len(new_columns) == 0: + return + + # Update columns + for new_column in new_columns: + loc = len(self.columns) + self.insert(loc, new_column, value=None) + + def add_entry( + self: pd.DataFrame, + index: str, + **kwargs, + ): + """Add new entry to the dataframe. + + Args: + index: Name of this entry. Must be unique in this table. + kwargs: Description of new entry to register. + + Returns: + Pandas Series of added entry. This doesn't mutate the table. + """ + if not isinstance(index, str): + index = str(index) + if kwargs.keys() - set(self.columns): + self.add_columns(*kwargs.keys()) + + # A hack to avoid unwanted dtype update. Appending new row with .loc indexer + # performs enlargement and implicitly changes dtype. This often induces a confusion of + # NaN (numeric container) and None (object container) for missing values. + # Filling a row with None values before assigning actual values can keep column dtype, + # but this behavior might change in future pandas version. + # https://github.com/pandas-dev/pandas/issues/6485 + # Also see test.framework.test_data_table.TestBaseTable.test_type_* + self.loc[index] = [None] * len(self.columns) + + template = dict.fromkeys(self.columns, None) + template.update(kwargs) + self.loc[index] = pd.array(list(template.values()), dtype=object) + + def extra_columns( + self: pd.DataFrame, + ) -> List[str]: + """Return a list of columns added by a user.""" + return [c for c in self.columns if c not in self._default_columns()] diff --git a/qiskit_experiments/library/calibration/fine_drag_cal.py b/qiskit_experiments/library/calibration/fine_drag_cal.py index 7c52e2d6bd..c79bad7562 100644 --- a/qiskit_experiments/library/calibration/fine_drag_cal.py +++ b/qiskit_experiments/library/calibration/fine_drag_cal.py @@ -30,7 +30,7 @@ class FineDragCal(BaseCalibrationExperiment, FineDrag): - """A calibration version of the fine drag experiment.""" + """A calibration version of the fine DRAG experiment.""" def __init__( self, @@ -41,7 +41,7 @@ def __init__( cal_parameter_name: Optional[str] = "β", auto_update: bool = True, ): - r"""See class :class:`FineDrag` for details. + r"""See class :class:`.FineDrag` for details. Note that this class implicitly assumes that the target angle of the gate is :math:`\pi` as seen from the default experiment options. @@ -148,7 +148,7 @@ def update_calibrations(self, experiment_data: ExperimentData): class FineXDragCal(FineDragCal): - """Fine drag calibration of X gate.""" + """Fine DRAG calibration of X gate.""" def __init__( self, @@ -158,7 +158,7 @@ def __init__( cal_parameter_name: Optional[str] = "β", auto_update: bool = True, ): - r"""see class :class:`FineDrag` for details. + r"""see class :class:`.FineDrag` for details. Args: physical_qubits: Sequence containing the qubit for which to run the @@ -180,7 +180,7 @@ def __init__( class FineSXDragCal(FineDragCal): - """Fine drag calibration of X gate.""" + """Fine DRAG calibration of X gate.""" def __init__( self, @@ -190,7 +190,7 @@ def __init__( cal_parameter_name: Optional[str] = "β", auto_update: bool = True, ): - r"""see class :class:`FineDrag` for details. + r"""see class :class:`.FineDrag` for details. Args: physical_qubits: Sequence containing the qubit for which to run the diff --git a/qiskit_experiments/library/calibration/fine_frequency_cal.py b/qiskit_experiments/library/calibration/fine_frequency_cal.py index 5fe3251f89..13b1c64b62 100644 --- a/qiskit_experiments/library/calibration/fine_frequency_cal.py +++ b/qiskit_experiments/library/calibration/fine_frequency_cal.py @@ -123,7 +123,7 @@ def update_calibrations(self, experiment_data: ExperimentData): The frequency of the qubit is updated according to - ..math:: + .. math:: f \to f - \frac{{\rm d}\theta}{2\pi\tau{\rm d}t} diff --git a/qiskit_experiments/library/calibration/half_angle_cal.py b/qiskit_experiments/library/calibration/half_angle_cal.py index e56ca7f7df..01735fc797 100644 --- a/qiskit_experiments/library/calibration/half_angle_cal.py +++ b/qiskit_experiments/library/calibration/half_angle_cal.py @@ -130,7 +130,7 @@ def update_calibrations(self, experiment_data: ExperimentData): in the complex amplitude of the pulse. The update rule for the half angle calibration is therefore: - ..math:: + .. math:: A \to A \cdot e^{-i{\rm d}\theta_\text{hac}/2} diff --git a/qiskit_experiments/library/calibration/rough_drag_cal.py b/qiskit_experiments/library/calibration/rough_drag_cal.py index 53c0efcb63..21e055f8c4 100644 --- a/qiskit_experiments/library/calibration/rough_drag_cal.py +++ b/qiskit_experiments/library/calibration/rough_drag_cal.py @@ -102,7 +102,7 @@ def _attach_calibrations(self, circuit: QuantumCircuit): def update_calibrations(self, experiment_data: ExperimentData): """Update the beta using the value directly reported from the fit. - See :class:`DragCalAnalysis` for details on the fit. + See :class:`.DragCalAnalysis` for details on the fit. """ new_beta = BaseUpdater.get_value( diff --git a/qiskit_experiments/library/characterization/analysis/correlated_readout_error_analysis.py b/qiskit_experiments/library/characterization/analysis/correlated_readout_error_analysis.py index 6e9f31b239..73dece5171 100644 --- a/qiskit_experiments/library/characterization/analysis/correlated_readout_error_analysis.py +++ b/qiskit_experiments/library/characterization/analysis/correlated_readout_error_analysis.py @@ -33,7 +33,7 @@ class CorrelatedReadoutErrorAnalysis(BaseAnalysis): :math:`A` is a :math:`2^n\times 2^n` matrix :math:`A` such that :math:`A_{y,x}` is the probability to observe :math:`y` given the true outcome should be :math:`x`. - In the experiment, for each :math:`x`a circuit is constructed whose expected + In the experiment, for each :math:`x` a circuit is constructed whose expected outcome is :math:`x`. From the observed results on the circuit, the probability for each :math:`y` is determined, and :math:`A_{y,x}` is set accordingly. diff --git a/qiskit_experiments/library/characterization/analysis/drag_analysis.py b/qiskit_experiments/library/characterization/analysis/drag_analysis.py index 2aae5e1c9d..b6c9915a4f 100644 --- a/qiskit_experiments/library/characterization/analysis/drag_analysis.py +++ b/qiskit_experiments/library/characterization/analysis/drag_analysis.py @@ -104,7 +104,7 @@ def set_options(self, **fields): def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. @@ -156,8 +156,7 @@ def _generate_fit_guesses( def _run_curve_fit( self, - curve_data: curve.CurveData, - models: List[lmfit.Model], + curve_data: curve.ScatterTable, ) -> curve.CurveFitResult: r"""Perform curve fitting on given data collection and fit models. @@ -187,13 +186,11 @@ def _run_curve_fit( Args: curve_data: Formatted data to fit. - models: A list of LMFIT models that are used to build a cost function - for the LMFIT minimizer. Returns: The best fitting outcome with minimum reduced chi-squared value. """ - fit_result = super()._run_curve_fit(curve_data, models) + fit_result = super()._run_curve_fit(curve_data) if fit_result and fit_result.params is not None: beta = fit_result.params["beta"] @@ -207,7 +204,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared lower than three, + - a reduced chi-squared lower than three and greater than zero, - a DRAG parameter value within the first period of the lowest number of repetitions, - an error on the drag beta smaller than the beta. """ @@ -215,7 +212,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: fit_freq = fit_data.ufloat_params["freq"] criteria = [ - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, abs(fit_beta.nominal_value) < 1 / fit_freq.nominal_value / 2, curve.utils.is_error_not_significant(fit_beta), ] diff --git a/qiskit_experiments/library/characterization/analysis/ramsey_xy_analysis.py b/qiskit_experiments/library/characterization/analysis/ramsey_xy_analysis.py index 12c3c34bfd..cf8d74515b 100644 --- a/qiskit_experiments/library/characterization/analysis/ramsey_xy_analysis.py +++ b/qiskit_experiments/library/characterization/analysis/ramsey_xy_analysis.py @@ -104,7 +104,7 @@ def _default_options(cls): def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. @@ -194,13 +194,13 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared lower than three, + - a reduced chi-squared lower than three and greater than zero, - an error on the frequency smaller than the frequency. """ fit_freq = fit_data.ufloat_params["freq"] criteria = [ - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, curve.utils.is_error_not_significant(fit_freq), ] @@ -425,7 +425,7 @@ def _default_options(cls): def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. diff --git a/qiskit_experiments/library/characterization/analysis/t1_analysis.py b/qiskit_experiments/library/characterization/analysis/t1_analysis.py index cf34a15460..856f346de8 100644 --- a/qiskit_experiments/library/characterization/analysis/t1_analysis.py +++ b/qiskit_experiments/library/characterization/analysis/t1_analysis.py @@ -22,7 +22,6 @@ import qiskit_experiments.curve_analysis as curve import qiskit_experiments.data_processing as dp import qiskit_experiments.visualization as vis -from qiskit_experiments.curve_analysis.curve_data import CurveData from qiskit_experiments.data_processing.exceptions import DataProcessorError from qiskit_experiments.database_service.device_component import Qubit from qiskit_experiments.framework import BaseAnalysis, ExperimentData, AnalysisResultData, Options @@ -48,7 +47,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared lower than three + - a reduced chi-squared lower than three and greater than zero - absolute amp is within [0.9, 1.1] - base is less than 0.1 - amp error is less than 0.1 @@ -60,7 +59,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: base = fit_data.ufloat_params["base"] criteria = [ - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, abs(amp.nominal_value - 1.0) < 0.1, abs(base.nominal_value) < 0.1, curve.utils.is_error_not_significant(amp, absolute=0.1), @@ -95,7 +94,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared lower than three + - a reduced chi-squared lower than three and greater than zero - absolute amp is within [0.9, 1.1] - base is less than 0.1 - amp error is less than 0.1 @@ -107,7 +106,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: base = fit_data.ufloat_params["base"] criteria = [ - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, abs(amp.nominal_value - 1.0) < 0.1, abs(base.nominal_value) < 0.1, curve.utils.is_error_not_significant(amp, absolute=0.1), @@ -122,8 +121,8 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: def _format_data( self, - curve_data: curve.CurveData, - ) -> curve.CurveData: + curve_data: curve.ScatterTable, + ) -> curve.ScatterTable: """Postprocessing for the processed dataset. Args: @@ -133,20 +132,10 @@ def _format_data( Formatted data. """ # check if the SVD decomposition categorized 0 as 1 by calculating the average slope - diff_y = np.diff(unp.nominal_values(curve_data.y), axis=0) + diff_y = np.diff(curve_data.yval) avg_slope = sum(diff_y) / len(diff_y) - if avg_slope[0] > 0: - new_y_data = 1 - curve_data.y - new_curve_data = CurveData( - x=curve_data.x, - y=new_y_data, - y_err=curve_data.y_err, - shots=curve_data.shots, - data_allocation=curve_data.data_allocation, - labels=curve_data.labels, - ) - - return super()._format_data(new_curve_data) + if avg_slope > 0: + curve_data.yval = 1 - curve_data.yval return super()._format_data(curve_data) diff --git a/qiskit_experiments/library/characterization/analysis/t2hahn_analysis.py b/qiskit_experiments/library/characterization/analysis/t2hahn_analysis.py index 62a1345dfa..5099cc030c 100644 --- a/qiskit_experiments/library/characterization/analysis/t2hahn_analysis.py +++ b/qiskit_experiments/library/characterization/analysis/t2hahn_analysis.py @@ -50,7 +50,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared lower than three + - a reduced chi-squared lower than three and greater than zero - absolute amp is within [0.4, 0.6] - base is less is within [0.4, 0.6] - amp error is less than 0.1 @@ -62,7 +62,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: base = fit_data.ufloat_params["base"] criteria = [ - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, abs(amp.nominal_value - 0.5) < 0.1, abs(base.nominal_value - 0.5) < 0.1, curve.utils.is_error_not_significant(amp, absolute=0.1), diff --git a/qiskit_experiments/library/characterization/analysis/t2ramsey_analysis.py b/qiskit_experiments/library/characterization/analysis/t2ramsey_analysis.py index db58b80669..8c7e1738d1 100644 --- a/qiskit_experiments/library/characterization/analysis/t2ramsey_analysis.py +++ b/qiskit_experiments/library/characterization/analysis/t2ramsey_analysis.py @@ -41,7 +41,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: """Algorithmic criteria for whether the fit is good or bad. A good fit has: - - a reduced chi-squared lower than three + - a reduced chi-squared lower than three and greater than zero - relative error of amp is less than 10 percent - relative error of tau is less than 10 percent - relative error of freq is less than 10 percent @@ -51,7 +51,7 @@ def _evaluate_quality(self, fit_data: curve.CurveFitResult) -> Union[str, None]: freq = fit_data.ufloat_params["freq"] criteria = [ - fit_data.reduced_chisq < 3, + 0 < fit_data.reduced_chisq < 3, curve.utils.is_error_not_significant(amp, fraction=0.1), curve.utils.is_error_not_significant(tau, fraction=0.1), curve.utils.is_error_not_significant(freq, fraction=0.1), diff --git a/qiskit_experiments/library/characterization/analysis/zz_ramsey_analysis.py b/qiskit_experiments/library/characterization/analysis/zz_ramsey_analysis.py index 77de28f7ef..5f5c9770bc 100644 --- a/qiskit_experiments/library/characterization/analysis/zz_ramsey_analysis.py +++ b/qiskit_experiments/library/characterization/analysis/zz_ramsey_analysis.py @@ -21,7 +21,12 @@ from qiskit.providers.options import Options import qiskit_experiments.curve_analysis as curve -from qiskit_experiments.curve_analysis import CurveAnalysis, CurveData, CurveFitResult, FitOptions +from qiskit_experiments.curve_analysis import ( + CurveAnalysis, + ScatterTable, + CurveFitResult, + FitOptions, +) from qiskit_experiments.curve_analysis.utils import is_error_not_significant @@ -121,7 +126,7 @@ def _default_options(cls) -> Options: def _generate_fit_guesses( self, user_opt: FitOptions, - curve_data: CurveData, + curve_data: ScatterTable, ) -> Union[FitOptions, List[FitOptions]]: """Compute the initial guesses. diff --git a/qiskit_experiments/library/characterization/cr_hamiltonian.py b/qiskit_experiments/library/characterization/cr_hamiltonian.py index e655fd2fc5..97378dd2b7 100644 --- a/qiskit_experiments/library/characterization/cr_hamiltonian.py +++ b/qiskit_experiments/library/characterization/cr_hamiltonian.py @@ -122,7 +122,7 @@ class CrossResonanceHamiltonian(BaseExperiment): # section: manual .. ref_website:: Qiskit Textbook 6.7, - https://qiskit.org/textbook/ch-quantum-hardware/hamiltonian-tomography.html + https://github.com/Qiskit/textbook/blob/main/notebooks/quantum-hardware-pulses/hamiltonian-tomography.ipynb """ # Number of CR pulses. The flat top duration per pulse is divided by this number. diff --git a/qiskit_experiments/library/characterization/drag.py b/qiskit_experiments/library/characterization/drag.py index 7e7639fe3b..fb48add906 100644 --- a/qiskit_experiments/library/characterization/drag.py +++ b/qiskit_experiments/library/characterization/drag.py @@ -166,7 +166,7 @@ def circuits(self) -> List[QuantumCircuit]: ) for beta_val in self.experiment_options.betas: - beta_val = np.round(beta_val, decimals=6) + beta_val = float(np.round(beta_val, decimals=6)) assigned_circuit = circuit.assign_parameters({beta: beta_val}, inplace=False) diff --git a/qiskit_experiments/library/characterization/half_angle.py b/qiskit_experiments/library/characterization/half_angle.py index 2b17e68949..4743149512 100644 --- a/qiskit_experiments/library/characterization/half_angle.py +++ b/qiskit_experiments/library/characterization/half_angle.py @@ -43,9 +43,48 @@ class HalfAngle(BaseExperiment): This sequence measures angle errors where the axis of the :code:`sx` and :code:`x` rotation are not parallel. A similar experiment is described in Ref.~[1] where the gate sequence :code:`x - y` is repeated to amplify errors caused by non-orthogonal - :code:`x` and :code:`y` rotation axes. Such errors can occur due to phase errors. - For example, the non-linearities in the mixer's skew for :math:`\pi/2` pulses may - be different from the :math:`\pi` pulse. + :code:`x` and :code:`y` rotation axes. + + One cause of such errors is non-linearity in the microwave mixer used + to produce the pulses for the ``x`` and ``sx`` gates. Typically, these + gates are calibrated to have the same duration and so have different + pulse amplitudes. Non-linearities in the mixer's skew can cause the + angle to differ for these different pulse amplitudes. + + The way the experiment works is that the initial ``Ry(π/2)`` puts the + qubit close to the :math:`+X` state, with a deviation :math:`δθ`, due + to the misalignment between ``sx`` and ``x`` (``Ry(π/2)`` is + implemented with ``sx`` as described below). The first ``sx - sx`` do + nothing as they should be rotations about the axis the qubit is + pointing along. The first ``y`` then mirrors the qubit about the + :math:`y` axis in the :math:`xy` plane of the Bloch sphere, so the + :math:`δθ` deviation from :math:`+X` becomes a :math:`-δθ` from + :math:`-X`. The next ``sx - sx`` sequence rotates about the axis that + is :math:`+δθ` rotated in the :math:`xy` plane from :math:`+X`, which + takes the deviation from :math:`-X` from :math:`-δθ` to :math:`+3 δθ`. + Then the next ``y`` mirrors this across the :math:`y` axis, taking the + state to :math:`-3 δθ` from :math:`+X`. This pattern continues with + each iteration, with the angular deviation in units of :math:`δθ` + following the sequence 1, 3, 5, 7, 9, etc. from :math:`+X` and + :math:`-X`. The final ``sx`` rotation serves mainly to rotate these + deviations from :math:`+X` and :math:`-X` in the :math:`xy` plane into + deviations out of the :math:`xy` plane, so that they appear as a signal + in the :math:`Z` basis. Because ``sx`` has a :math:`δθ` deviation from + ``x``, the final ``sx`` adds an extra :math:`δθ` to the deviations, so + the pattern ends up as 2, 4, 6, 8, etc., meaning that each iteration + adds :math:`2 δθ` to the deviation from the equator of the Bloch sphere + (with the sign alternating due to the ``y`` gates, so the deviations + are really -2, 4, -6, 8, etc.). + + For the implementation of the circuits, the experiment uses ``Rz(π/2) - + sx - Rz(-π/2)`` to implement the ``Ry(π/2)`` and ``Rz(π/2) - x - + Rz(-π/2)`` to implement the ``y``. So the experiment makes use of only + ``sx``, ``x``, ``Rz(π/2)``, and ``Rz(-π/2)`` gates. For the + experiment's analysis to be valid, it is important that the ``sx`` and + ``x`` gates are not replaced (such as by a transpiler pass that + replaces ``x`` with ``sx - sx``), as it is the angle between them which + is being inferred. It is assumed that the angle between ``x`` and + ``Rz`` is exactly :math:`π/2`. # section: analysis_ref :class:`.ErrorAmplificationAnalysis` @@ -66,18 +105,6 @@ def _default_experiment_options(cls) -> Options: options.repetitions = list(range(15)) return options - @classmethod - def _default_transpile_options(cls) -> Options: - """Default transpile options. - - The basis gates option should not be changed since it will affect the gates and - the pulses that are run on the hardware. - """ - options = super()._default_transpile_options() - options.basis_gates = ["sx", "rz", "y"] - options.inst_map = None - return options - def __init__(self, physical_qubits: Sequence[int], backend: Optional[Backend] = None): """Setup a half angle experiment on the given qubit. @@ -126,7 +153,9 @@ def circuits(self) -> List[QuantumCircuit]: for _ in range(repetition): circuit.sx(0) circuit.sx(0) - circuit.y(0) + circuit.rz(np.pi / 2, 0) + circuit.x(0) + circuit.rz(-np.pi / 2, 0) circuit.sx(0) circuit.measure_all() diff --git a/qiskit_experiments/library/characterization/multi_state_discrimination.py b/qiskit_experiments/library/characterization/multi_state_discrimination.py index 14078af813..ffea405a52 100644 --- a/qiskit_experiments/library/characterization/multi_state_discrimination.py +++ b/qiskit_experiments/library/characterization/multi_state_discrimination.py @@ -55,7 +55,7 @@ class MultiStateDiscrimination(BaseExperiment): # section: reference `Qiskit Textbook\ - `_ + `_ """ diff --git a/qiskit_experiments/library/characterization/rabi.py b/qiskit_experiments/library/characterization/rabi.py index 4f102f84d3..9193b22dd5 100644 --- a/qiskit_experiments/library/characterization/rabi.py +++ b/qiskit_experiments/library/characterization/rabi.py @@ -50,8 +50,8 @@ class Rabi(BaseExperiment, RestlessMixin): # section: manual :ref:`Rabi Calibration` - See also `Qiskit Textbook `_ + See also the `Qiskit Textbook + `_ for the pulse level programming of a Rabi experiment. # section: analysis_ref @@ -187,7 +187,7 @@ def _metadata(self): class EFRabi(Rabi): r"""An experiment that scans the amplitude of a pulse inducing rotations on the - :math:`|1\rangle` <-> :math:`|2\rangle` transition. + :math:`|1\rangle` <-> :math:`|2\rangle` transition. # section: overview diff --git a/qiskit_experiments/library/quantum_volume/qv_analysis.py b/qiskit_experiments/library/quantum_volume/qv_analysis.py index d1575309d8..730b4bb020 100644 --- a/qiskit_experiments/library/quantum_volume/qv_analysis.py +++ b/qiskit_experiments/library/quantum_volume/qv_analysis.py @@ -34,7 +34,7 @@ class QuantumVolumeAnalysis(BaseAnalysis): # section: overview Calculate the quantum volume of the analysed system. The quantum volume is determined by the largest successful circuit depth. - A depth is successful if it has 'mean heavy-output probability' > 2/3 with confidence + A depth is successful if it has `mean heavy-output probability` > 2/3 with confidence level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran. we assume the error (standard deviation) of the heavy output probability is due to a binomial distribution. The standard deviation for binomial distribution is @@ -175,7 +175,7 @@ def _calc_quantum_volume(self, heavy_output_prob_exp, depth, trials): """ Calc the quantum volume of the analysed system. quantum volume is determined by the largest successful depth. - A depth is successful if it has 'mean heavy-output probability' > 2/3 with confidence + A depth is successful if it has `mean heavy-output probability` > 2/3 with confidence level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran. we assume the error (standard deviation) of the heavy output probability is due to a binomial distribution. standard deviation for binomial distribution is sqrt(np(1-p)), @@ -187,7 +187,7 @@ def _calc_quantum_volume(self, heavy_output_prob_exp, depth, trials): whether the results passed the threshold, the confidence of the result, the heavy output probability for each trial, - the mean heavy output probability, + the mean heavy-output probability, the error of the heavy output probability, the depth of the circuit, the number of trials ran diff --git a/qiskit_experiments/library/quantum_volume/qv_experiment.py b/qiskit_experiments/library/quantum_volume/qv_experiment.py index 8f1528f157..9e8bea0012 100644 --- a/qiskit_experiments/library/quantum_volume/qv_experiment.py +++ b/qiskit_experiments/library/quantum_volume/qv_experiment.py @@ -40,8 +40,8 @@ class QuantumVolume(BaseExperiment): The Quantum Volume is determined by the largest circuit depth :math:`d_{max}`, and equals to :math:`2^{d_{max}}`. - See `Qiskit Textbook - `_ + See the `Qiskit Textbook + `_ for an explanation on the QV protocol. In the QV experiment we generate :class:`~qiskit.circuit.library.QuantumVolume` circuits on @@ -50,7 +50,7 @@ class QuantumVolume(BaseExperiment): Then these circuits run on the quantum backend and on an ideal simulator (either :class:`~qiskit_aer.AerSimulator` or :class:`~qiskit.quantum_info.Statevector`). - A depth :math:`d` QV circuit is successful if it has 'mean heavy-output probability' > 2/3 with + A depth :math:`d` QV circuit is successful if it has `mean heavy-output probability` > 2/3 with confidence level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran. See :class:`QuantumVolumeAnalysis` documentation for additional diff --git a/qiskit_experiments/library/randomized_benchmarking/clifford_utils.py b/qiskit_experiments/library/randomized_benchmarking/clifford_utils.py index fb5759ac8d..f6ab757b5a 100644 --- a/qiskit_experiments/library/randomized_benchmarking/clifford_utils.py +++ b/qiskit_experiments/library/randomized_benchmarking/clifford_utils.py @@ -17,10 +17,9 @@ import os from functools import lru_cache from numbers import Integral -from typing import Optional, Union, Tuple, Sequence +from typing import Optional, Union, Tuple, Sequence, Iterable import numpy as np -import scipy.sparse from numpy.random import Generator, default_rng from qiskit.circuit import CircuitInstruction, Qubit @@ -36,9 +35,13 @@ _CLIFFORD_COMPOSE_1Q = np.load(f"{_DATA_FOLDER}/clifford_compose_1q.npz")["table"] _CLIFFORD_INVERSE_1Q = np.load(f"{_DATA_FOLDER}/clifford_inverse_1q.npz")["table"] -_CLIFFORD_COMPOSE_2Q = scipy.sparse.load_npz(f"{_DATA_FOLDER}/clifford_compose_2q_sparse.npz") _CLIFFORD_INVERSE_2Q = np.load(f"{_DATA_FOLDER}/clifford_inverse_2q.npz")["table"] - +_clifford_compose_2q_data = np.load(f"{_DATA_FOLDER}/clifford_compose_2q_dense_selected.npz") +_CLIFFORD_COMPOSE_2Q_DENSE = _clifford_compose_2q_data["table"] +# valid indices for the columns of the _CLIFFORD_COMPOSE_2Q_DENSE table +_valid_sparse_indices = _clifford_compose_2q_data["valid_sparse_indices"] +# map a clifford number to the index of _CLIFFORD_COMPOSE_2Q_DENSE +_clifford_num_to_dense_index = {idx: ii for ii, idx in enumerate(_valid_sparse_indices)} # Transpilation utilities def _transpile_clifford_circuit( @@ -420,8 +423,9 @@ def compose_2q(lhs: Integral, rhs: Integral) -> Integral: """Return the composition of 2-qubit clifford integers.""" num = lhs for layer, idx in enumerate(_layer_indices_from_num(rhs)): - circ = _CLIFFORD_LAYER[layer][idx] - num = _compose_num_with_circuit_2q(num, circ) + gate_numbers = _CLIFFORD_LAYER_NUMS[layer][idx] + for n in gate_numbers: + num = _CLIFFORD_COMPOSE_2Q_DENSE[num, _clifford_num_to_dense_index[n]] return num @@ -432,17 +436,9 @@ def inverse_2q(num: Integral) -> Integral: def num_from_2q_circuit(qc: QuantumCircuit) -> Integral: """Convert a given 2-qubit Clifford circuit to the corresponding integer.""" - return _compose_num_with_circuit_2q(0, qc) - - -def _compose_num_with_circuit_2q(num: Integral, qc: QuantumCircuit) -> Integral: - """Compose a number that represents a Clifford, with a Clifford circuit, and return the - number that represents the resulting Clifford.""" - lhs = num - for inst in qc: - qubits = tuple(qc.find_bit(q).index for q in inst.qubits) - rhs = _num_from_2q_gate(op=inst.operation, qubits=qubits) - lhs = _CLIFFORD_COMPOSE_2Q[lhs, rhs] + lhs = 0 + for rhs in _clifford_2q_nums_from_2q_circuit(qc): + lhs = _CLIFFORD_COMPOSE_2Q_DENSE[lhs, _clifford_num_to_dense_index[rhs]] return lhs @@ -568,6 +564,20 @@ def _create_cliff_2q_layer_2(): _NUM_LAYER_2 = 16 +def _clifford_2q_nums_from_2q_circuit(qc: QuantumCircuit) -> Iterable[Integral]: + """Yield Clifford numbers that represents the 2Q Clifford circuit.""" + for inst in qc: + qubits = tuple(qc.find_bit(q).index for q in inst.qubits) + yield _num_from_2q_gate(op=inst.operation, qubits=qubits) + + +# Construct mapping from Clifford layers to series of Clifford numbers +_CLIFFORD_LAYER_NUMS = [ + [tuple(_clifford_2q_nums_from_2q_circuit(qc)) for qc in _CLIFFORD_LAYER[layer]] + for layer in [0, 1, 2] +] + + @lru_cache(maxsize=None) def _transformed_clifford_layer( layer: int, index: Integral, basis_gates: Tuple[str, ...] diff --git a/qiskit_experiments/library/randomized_benchmarking/data/clifford_compose_1q.npz b/qiskit_experiments/library/randomized_benchmarking/data/clifford_compose_1q.npz index 5794227a06..64835c36cb 100644 Binary files a/qiskit_experiments/library/randomized_benchmarking/data/clifford_compose_1q.npz and b/qiskit_experiments/library/randomized_benchmarking/data/clifford_compose_1q.npz differ diff --git a/qiskit_experiments/library/randomized_benchmarking/data/clifford_compose_2q_dense_selected.npz b/qiskit_experiments/library/randomized_benchmarking/data/clifford_compose_2q_dense_selected.npz new file mode 100644 index 0000000000..0bce584b2a Binary files /dev/null and b/qiskit_experiments/library/randomized_benchmarking/data/clifford_compose_2q_dense_selected.npz differ diff --git a/qiskit_experiments/library/randomized_benchmarking/data/clifford_compose_2q_sparse.npz b/qiskit_experiments/library/randomized_benchmarking/data/clifford_compose_2q_sparse.npz deleted file mode 100644 index 19439b7be6..0000000000 Binary files a/qiskit_experiments/library/randomized_benchmarking/data/clifford_compose_2q_sparse.npz and /dev/null differ diff --git a/qiskit_experiments/library/randomized_benchmarking/data/clifford_inverse_1q.npz b/qiskit_experiments/library/randomized_benchmarking/data/clifford_inverse_1q.npz index dedef02825..a72204ac70 100644 Binary files a/qiskit_experiments/library/randomized_benchmarking/data/clifford_inverse_1q.npz and b/qiskit_experiments/library/randomized_benchmarking/data/clifford_inverse_1q.npz differ diff --git a/qiskit_experiments/library/randomized_benchmarking/data/clifford_inverse_2q.npz b/qiskit_experiments/library/randomized_benchmarking/data/clifford_inverse_2q.npz index 62a8314f15..a0dde142f0 100644 Binary files a/qiskit_experiments/library/randomized_benchmarking/data/clifford_inverse_2q.npz and b/qiskit_experiments/library/randomized_benchmarking/data/clifford_inverse_2q.npz differ diff --git a/qiskit_experiments/library/randomized_benchmarking/data/generate_clifford_data.py b/qiskit_experiments/library/randomized_benchmarking/data/generate_clifford_data.py index 8da50467cb..7ebd45dcab 100644 --- a/qiskit_experiments/library/randomized_benchmarking/data/generate_clifford_data.py +++ b/qiskit_experiments/library/randomized_benchmarking/data/generate_clifford_data.py @@ -16,8 +16,6 @@ The script relies on the values of ``_CLIFF_SINGLE_GATE_MAP_2Q`` in :mod:`~qiskit_experiment.library.randomized_benchmarking.clifford_utils` so they must be set correctly before running the script. - -Note: Terra >= 0.22 is required to run this script. """ import itertools @@ -109,7 +107,29 @@ def gen_clifford_compose_2q_gate(): for rhs in _CLIFF_SINGLE_GATE_MAP_2Q.values(): composed = cliff_lhs.compose(_CLIFF_2Q[rhs]) products[lhs, rhs] = _TO_INT_2Q[_hash_cliff(composed)] - return products.tocsr() + return products.tocsc() + + +def gen_clifford_compose_2q_dense() -> tuple[np.typing.NDArray[int], list[int]]: + """Generate a dense multiplication table for 2-qubit Clifford numbers + + The multiplication table is generated from the sparse table generated by :meth:`gen_clifford_compose_2q_gate`. + Each column contains the full set of Clifford numbers . Each row contains a subset of Clifford multiplications + corresponding to the values of entries of `_CLIFF_SINGLE_GATE_MAP_2Q`. + + Returns: + Tuple with a dense multiplication table and the valid indices for the columns + """ + _CLIFFORD_COMPOSE_2Q = gen_clifford_compose_2q_gate() + number_of_cliffords = _CLIFFORD_COMPOSE_2Q.shape[0] + valid_sparse_indices = [ + num + for num in range(number_of_cliffords) + if _CLIFFORD_COMPOSE_2Q[:, num].nnz == number_of_cliffords - 1 + ] + _CLIFFORD_COMPOSE_2Q_DENSE = _CLIFFORD_COMPOSE_2Q[:, valid_sparse_indices].toarray() + + return _CLIFFORD_COMPOSE_2Q_DENSE, valid_sparse_indices _GATE_LIST_1Q = [ @@ -184,4 +204,11 @@ def gen_cliff_single_2q_gate_map(): "_CLIFF_SINGLE_GATE_MAP_2Q must be generated by gen_cliff_single_2q_gate_map()" ) np.savez_compressed("clifford_inverse_2q.npz", table=gen_clifford_inverse_2q()) - scipy.sparse.save_npz("clifford_compose_2q_sparse.npz", gen_clifford_compose_2q_gate()) + + _CLIFFORD_COMPOSE_2Q_DENSE, valid_sparse_indices = gen_clifford_compose_2q_dense() + + np.savez_compressed( + "clifford_compose_2q_dense_selected.npz", + table=_CLIFFORD_COMPOSE_2Q_DENSE, + valid_sparse_indices=valid_sparse_indices, + ) diff --git a/qiskit_experiments/library/randomized_benchmarking/interleaved_rb_analysis.py b/qiskit_experiments/library/randomized_benchmarking/interleaved_rb_analysis.py index 266ea0703b..7864b20436 100644 --- a/qiskit_experiments/library/randomized_benchmarking/interleaved_rb_analysis.py +++ b/qiskit_experiments/library/randomized_benchmarking/interleaved_rb_analysis.py @@ -120,7 +120,7 @@ def _default_options(cls): def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. diff --git a/qiskit_experiments/library/randomized_benchmarking/rb_analysis.py b/qiskit_experiments/library/randomized_benchmarking/rb_analysis.py index 28ba5c7e23..6670934c94 100644 --- a/qiskit_experiments/library/randomized_benchmarking/rb_analysis.py +++ b/qiskit_experiments/library/randomized_benchmarking/rb_analysis.py @@ -110,7 +110,7 @@ def _default_options(cls): def _generate_fit_guesses( self, user_opt: curve.FitOptions, - curve_data: curve.CurveData, + curve_data: curve.ScatterTable, ) -> Union[curve.FitOptions, List[curve.FitOptions]]: """Create algorithmic initial fit guess from analysis options and curve data. diff --git a/qiskit_experiments/library/randomized_benchmarking/standard_rb.py b/qiskit_experiments/library/randomized_benchmarking/standard_rb.py index 3da209f07a..e87091fd3f 100644 --- a/qiskit_experiments/library/randomized_benchmarking/standard_rb.py +++ b/qiskit_experiments/library/randomized_benchmarking/standard_rb.py @@ -22,7 +22,7 @@ from numpy.random import Generator, default_rng from numpy.random.bit_generator import BitGenerator, SeedSequence -from qiskit.circuit import QuantumCircuit, Instruction, Barrier +from qiskit.circuit import CircuitInstruction, QuantumCircuit, Instruction, Barrier from qiskit.exceptions import QiskitError from qiskit.providers import BackendV2Converter from qiskit.providers.backend import Backend, BackendV1, BackendV2 @@ -60,7 +60,7 @@ class StandardRB(BaseExperiment, RestlessMixin): Randomized Benchmarking (RB) is an efficient and robust method for estimating the average error rate of a set of quantum gate operations. See `Qiskit Textbook - `_ + `_ for an explanation on the RB method. A standard RB experiment generates sequences of random Cliffords @@ -241,6 +241,9 @@ def _get_basis_gates(self) -> Optional[Tuple[str, ...]]: return tuple(sorted(basis_gates)) if basis_gates else None def is_bidirectional(coupling_map): + if coupling_map is None: + # None for a coupling map implies all-to-all coupling + return True return len(coupling_map.reduce(self.physical_qubits).get_edges()) == 2 # 2 qubits case: Return all basis gates except for one-way directed 2q-gates. @@ -288,7 +291,7 @@ def _sequences_to_circuits( circ = QuantumCircuit(self.num_qubits) for elem in seq: circ.append(self._to_instruction(elem, basis_gates), circ.qubits) - circ.append(Barrier(self.num_qubits), circ.qubits) + circ._append(CircuitInstruction(Barrier(self.num_qubits), circ.qubits)) # Compute inverse, compute only the difference from the previous shorter sequence prev_elem = self.__compose_clifford_seq(prev_elem, seq[len(prev_seq) :]) diff --git a/qiskit_experiments/library/tomography/basis/base_basis.py b/qiskit_experiments/library/tomography/basis/base_basis.py index c116b713b2..59aa14ad85 100644 --- a/qiskit_experiments/library/tomography/basis/base_basis.py +++ b/qiskit_experiments/library/tomography/basis/base_basis.py @@ -100,7 +100,7 @@ class PreparationBasis(BaseBasis): @abstractmethod def matrix_shape(self, qubits: Sequence[int]) -> Tuple[int, ...]: - """Return the shape of subsystem dimensions of the state attr:`~matrix`. + """Return the shape of subsystem dimensions of the state :attr:`~matrix`. Args: qubits: the physical qubit subsystems. @@ -170,7 +170,7 @@ def outcome_shape(self, qubits: Sequence[int]) -> Tuple[int, ...]: @abstractmethod def matrix_shape(self, qubits: Sequence[int]) -> Tuple[int, ...]: - """Return the shape of subsystem dimensions of a POVM attr:`~matrix`. + """Return the shape of subsystem dimensions of a POVM :attr:`~matrix`. Args: qubits: the physical qubit subsystems. diff --git a/qiskit_experiments/library/tomography/fitters/cvxpy_utils.py b/qiskit_experiments/library/tomography/fitters/cvxpy_utils.py index af2bc3c0c3..0f544e4b14 100644 --- a/qiskit_experiments/library/tomography/fitters/cvxpy_utils.py +++ b/qiskit_experiments/library/tomography/fitters/cvxpy_utils.py @@ -64,7 +64,9 @@ def decorated_func(*args, **kwargs): return decorated_func -def solve_iteratively(problem: Problem, initial_iters: int, scale: int = 2, **solve_kwargs) -> None: +def solve_iteratively( + problem: Problem, initial_iters: int, scale: int = 2, solver: str = "SCS", **solve_kwargs +) -> None: """Solve a CVXPY problem increasing iterations if solution is inaccurate. If the problem is not solved with the ``initial_iters`` value of @@ -79,6 +81,7 @@ def solve_iteratively(problem: Problem, initial_iters: int, scale: int = 2, **so when solving the problem scale: Scale factor for increasing the initial_iters up to max_iters at each step (Default: 2). + solver: The solver to use. Defaults to the Splitting Conic Solver. solve_kwargs: kwargs for problem.solve method. Raises: @@ -90,7 +93,7 @@ def solve_iteratively(problem: Problem, initial_iters: int, scale: int = 2, **so problem_solved = False while not problem_solved: solve_kwargs["max_iters"] = current_max_iters - problem.solve(**solve_kwargs) + problem.solve(solver=solver, **solve_kwargs) if problem.status in ["optimal_inaccurate", "optimal"]: problem_solved = True elif problem.status == "unbounded_inaccurate": diff --git a/qiskit_experiments/library/tomography/mit_qpt_experiment.py b/qiskit_experiments/library/tomography/mit_qpt_experiment.py index bc7d859137..bdebf34bd0 100644 --- a/qiskit_experiments/library/tomography/mit_qpt_experiment.py +++ b/qiskit_experiments/library/tomography/mit_qpt_experiment.py @@ -78,7 +78,7 @@ def __init__( basis_indices: Optional, a list of basis indices for generating partial tomography measurement data. Each item should be given as a pair of lists of preparation and measurement basis configurations - ``([p[0], p[1], ..], m[0], m[1], ...])``, where ``p[i]`` is the + ``([p[0], p[1], ...], [m[0], m[1], ...])``, where ``p[i]`` is the preparation basis index, and ``m[i]`` is the measurement basis index for qubit-i. If not specified full tomography for all indices of the preparation and measurement bases will be performed. diff --git a/qiskit_experiments/library/tomography/qpt_experiment.py b/qiskit_experiments/library/tomography/qpt_experiment.py index fb253a8a06..17d7d7347b 100644 --- a/qiskit_experiments/library/tomography/qpt_experiment.py +++ b/qiskit_experiments/library/tomography/qpt_experiment.py @@ -81,7 +81,7 @@ def __init__( basis_indices: Optional, a list of basis indices for generating partial tomography measurement data. Each item should be given as a pair of lists of preparation and measurement basis configurations - ``([p[0], p[1], ..], m[0], m[1], ...])``, where ``p[i]`` is the + ``([p[0], p[1], ...], [m[0], m[1], ...])``, where ``p[i]`` is the preparation basis index, and ``m[i]`` is the measurement basis index for qubit-i. If not specified full tomography for all indices of the preparation and measurement bases will be performed. diff --git a/qiskit_experiments/library/tomography/tomography_experiment.py b/qiskit_experiments/library/tomography/tomography_experiment.py index b873834bb6..bced816a49 100644 --- a/qiskit_experiments/library/tomography/tomography_experiment.py +++ b/qiskit_experiments/library/tomography/tomography_experiment.py @@ -207,7 +207,6 @@ def circuits(self): if prep_element: # Add tomography preparation prep_circ = self._prep_circ_basis.circuit(prep_element, self._prep_physical_qubits) - circ.reset(self._prep_indices) circ.compose(prep_circ, self._prep_indices, inplace=True) circ.barrier(*self._prep_indices) diff --git a/qiskit_experiments/test/__init__.py b/qiskit_experiments/test/__init__.py index 609dbdec96..41df7bfee5 100644 --- a/qiskit_experiments/test/__init__.py +++ b/qiskit_experiments/test/__init__.py @@ -37,7 +37,6 @@ MockIQParallelBackend T2HahnBackend NoisyDelayAerBackend - SingleTransmonTestBackend Helpers ======= diff --git a/qiskit_experiments/test/fake_backend.py b/qiskit_experiments/test/fake_backend.py index 07bab78a78..d9f4772350 100644 --- a/qiskit_experiments/test/fake_backend.py +++ b/qiskit_experiments/test/fake_backend.py @@ -14,6 +14,7 @@ import uuid from qiskit.circuit.library import Measure from qiskit.providers.backend import BackendV2 +from qiskit.providers.fake_provider import FakeProvider from qiskit.providers.options import Options from qiskit.transpiler import Target @@ -27,8 +28,14 @@ class FakeBackend(BackendV2): Fake backend for test purposes only. """ - def __init__(self, backend_name="fake_backend", num_qubits=1, max_experiments=100): - super().__init__(name=backend_name) + def __init__( + self, + provider=FakeProvider(), + backend_name="fake_backend", + num_qubits=1, + max_experiments=100, + ): + super().__init__(provider=provider, name=backend_name) self._target = Target(num_qubits=num_qubits) # Add a measure for each qubit so a simple measure circuit works self.target.add_instruction(Measure()) diff --git a/qiskit_experiments/test/fake_service.py b/qiskit_experiments/test/fake_service.py index 2034380842..5029193921 100644 --- a/qiskit_experiments/test/fake_service.py +++ b/qiskit_experiments/test/fake_service.py @@ -12,12 +12,12 @@ """Fake service class for tests.""" -from typing import Optional, List, Dict, Type, Any, Union, Tuple, Callable -import functools +from typing import Optional, List, Dict, Type, Any, Union, Tuple import json from datetime import datetime, timedelta import uuid +import pandas as pd from qiskit_ibm_experiment import AnalysisResultData from qiskit_experiments.test.fake_backend import FakeBackend @@ -28,41 +28,6 @@ ) -# Check if PANDAS package is installed -try: - import pandas as pd - - HAS_PANDAS = True -except ImportError: - pd = None - HAS_PANDAS = False - - -def requires_pandas(func: Callable) -> Callable: - """Function decorator for functions requiring Pandas. - - Args: - func: a function requiring Pandas. - - Returns: - The decorated function. - - Raises: - QiskitError: If Pandas is not installed. - """ - - @functools.wraps(func) - def decorated_func(*args, **kwargs): - if not HAS_PANDAS: - raise ImportError( - f"The pandas python package is required for {func}." - "You can install it with 'pip install pandas'." - ) - return func(*args, **kwargs) - - return decorated_func - - class FakeService: """ This extremely simple database is designated for testing and as a playground for developers. @@ -71,7 +36,6 @@ class FakeService: It implements most of the methods of `DatabaseService`. """ - @requires_pandas def __init__(self): self.exps = pd.DataFrame( columns=[ diff --git a/qiskit_experiments/test/mock_iq_backend.py b/qiskit_experiments/test/mock_iq_backend.py index bd936d7820..09094db37a 100644 --- a/qiskit_experiments/test/mock_iq_backend.py +++ b/qiskit_experiments/test/mock_iq_backend.py @@ -229,7 +229,8 @@ def __init__( Args: experiment_helper(MockIQExperimentHelper): Experiment helper class that contains - 'compute_probabilities' function and 'iq_phase' function for the backend to execute. + :meth:`~MockIQExperimentHelper.compute_probabilities` and + :meth:`~MockIQExperimentHelper.iq_phase` methods for the backend to execute. rng_seed(int): The random seed value. """ diff --git a/qiskit_experiments/test/mock_iq_helpers.py b/qiskit_experiments/test/mock_iq_helpers.py index f583080940..7f093cde95 100644 --- a/qiskit_experiments/test/mock_iq_helpers.py +++ b/qiskit_experiments/test/mock_iq_helpers.py @@ -44,9 +44,9 @@ def __init__( :class:`MockIQBackend` instance. These are used by :meth:`iq_clusters` by default. Subclasses can override :meth:`iq_clusters` to return a modified version of :attr:`iq_cluster_centers` and :attr:`iq_cluster_width`. - `iq_cluster_centers` is a list of tuples. For a given qubit ``i_qbt`` and + :attr:`iq_cluster_centers` is a list of tuples. For a given qubit ``i_qbt`` and computational state ``i_state`` (either `0` or `1`), the centers of the IQ - clusters are found by indexing ``iq_cluster_centers`` as follows: + clusters are found by indexing :attr:`iq_cluster_centers` as follows: .. code-block:: python @@ -69,7 +69,7 @@ def __init__( are different centers for different logical values of the qubit. Defaults to a single qubit with clusters in quadrants 1 and 3. iq_cluster_width: A list of standard deviation values for the sampling of each qubit. - Defaults to widths of 1.0 for each qubit in `iq_cluster_centers`. + Defaults to widths of 1.0 for each qubit in :attr:`iq_cluster_centers`. """ self._iq_cluster_centers = ( @@ -192,15 +192,15 @@ def iq_clusters( Subclasses can override this function to modify the centers and widths of IQ clusters based on the circuits being simulated by a :class:`MockIQBackend`. The base centers and widths are stored internally within the helper object, and can be set in :meth:`__init__` or by modifying - attr:`iq_cluster_centers` and attr:`iq_cluster_width`. The default behaviour for + :attr:`iq_cluster_centers` and :attr:`iq_cluster_width`. The default behavior for :meth:`iq_clusters` is to return the centers and widths unmodified for each circuit in - `circuits`. Subclasses may return different centers and widths based on the circuits provided. + ``circuits``. Subclasses may return different centers and widths based on the circuits provided. The returned list contains a tuple per circuit. Each tuple contains the IQ centers and widths in - the same format as attr:`iq_cluster_centers` and attr:`iq_cluster_width`, passed as + the same format as :attr:`iq_cluster_centers` and :attr:`iq_cluster_width`, passed as arguments to :meth:`__init__`. The format of the centers and widths lists, in the argument - list and in the returned tuples, must match the format of `iq_cluster_centers` and - `iq_cluster_width` in :func:`qiskit_experiments.test.MockIQExperimentHelper.__init__`. + list and in the returned tuples, must match the format of :attr:`iq_cluster_centers` and + :attr:`iq_cluster_width` in :func:`qiskit_experiments.test.MockIQExperimentHelper.__init__`. Args: circuits: The quantum circuits for which the clusters should be modified. @@ -224,8 +224,8 @@ def __init__( Parallel Experiment Helper initializer. The class assumes `exp_helper_list` is ordered to match the corresponding experiment in `exp_list`. - Note that :meth:`__init__` does not have `iq_cluster_centers` and `iq_cluster_width` as in - :func:`MockIQExperimentHelper.__init__`. This is because the centers and widths for + Note that :meth:`__init__` does not have :attr:`iq_cluster_centers` and :attr:`iq_cluster_width` + as in :func:`MockIQExperimentHelper.__init__`. This is because the centers and widths for :class:`MockIQParallelBackend` are stored in multiple experiment helpers in the list `exp_helper_list`. diff --git a/qiskit_experiments/test/pulse_backend.py b/qiskit_experiments/test/pulse_backend.py index fc886b9209..4d3473ebcf 100644 --- a/qiskit_experiments/test/pulse_backend.py +++ b/qiskit_experiments/test/pulse_backend.py @@ -72,6 +72,8 @@ def __init__( dt: float = 0.1 * 1e-9, solver_method="RK23", seed: int = 0, + atol: float = None, + rtol: float = None, **kwargs, ): """Initialize a backend with model information. @@ -85,6 +87,8 @@ def __init__( methods. Defaults to "RK23". seed: An optional seed given to the random number generator. If this argument is not set then the seed defaults to 0. + atol: Absolute tolerance during solving. + rtol: Relative tolerance during solving. """ from qiskit_dynamics import Solver @@ -109,6 +113,12 @@ def __init__( self.solver_method = solver_method + self.solve_kwargs = {} + if atol: + self.solve_kwargs["atol"] = atol + if rtol: + self.solve_kwargs["rtol"] = rtol + self.static_hamiltonian = static_hamiltonian self.hamiltonian_operators = hamiltonian_operators self.static_dissipators = static_dissipators @@ -390,6 +400,7 @@ def solve(self, schedule: Union[ScheduleBlock, Schedule]) -> np.ndarray: t_eval=[time_f], signals=signal, method=self.solver_method, + **self.solve_kwargs, ).y[0] return unitary @@ -497,7 +508,7 @@ class SingleTransmonTestBackend(PulseBackend): H = \hbar \sum_{j=1,2} \left[\omega_j |j\rangle\langle j| + \mathcal{E}(t) \lambda_j (\sigma_j^+ + \sigma_j^-)\right] - Here, :math:`\omega_j` is the transition frequency from level :math`0` to level + Here, :math:`\omega_j` is the transition frequency from level :math:`0` to level :math:`j`. :math:`\mathcal{E}(t)` is the drive field and :math:`\sigma_j^\pm` are the raising and lowering operators between levels :math:`j-1` and :math:`j`. """ @@ -510,6 +521,8 @@ def __init__( lambda_2: float = 0.8e9, gamma_1: float = 1e4, noise: bool = True, + atol: float = None, + rtol: float = None, **kwargs, ): """Initialise backend with hamiltonian parameters @@ -522,6 +535,8 @@ def __init__( gamma_1: Relaxation rate (1/T1) for 1-0. Defaults to 1e4. noise: Defaults to True. If True then T1 dissipation is included in the pulse-simulation. The strength is given by ``gamma_1``. + atol: Absolute tolerance during solving. + rtol: Relative tolerance during solving. """ from qiskit_dynamics.pulse import InstructionToSignals @@ -565,6 +580,8 @@ def __init__( rwa_cutoff_freq=1.9 * qubit_frequency, rwa_carrier_freqs=[qubit_frequency], evaluation_mode=evaluation_mode, + atol=atol, + rtol=rtol, **kwargs, ) diff --git a/qiskit_experiments/visualization/drawers/base_drawer.py b/qiskit_experiments/visualization/drawers/base_drawer.py index 69f7915e8b..ea63e0afd2 100644 --- a/qiskit_experiments/visualization/drawers/base_drawer.py +++ b/qiskit_experiments/visualization/drawers/base_drawer.py @@ -194,27 +194,38 @@ def _default_figure_options(cls) -> Options: there are multiple columns in the canvas, this could be a list of labels. ylabel (Union[str, List[str]]): Y-axis label string of the output figure. If there are multiple rows in the canvas, this could be a list of labels. - xlim (Tuple[float, float]): Min and max value of the horizontal axis. If not - provided, it is automatically scaled based on the input data points. - ylim (Tuple[float, float]): Min and max value of the vertical axis. If not - provided, it is automatically scaled based on the input data points. - xval_unit (str): Unit of x values. No scaling prefix is needed here as this - is controlled by ``xval_unit_scale``. - yval_unit (str): Unit of y values. See ``xval_unit`` for details. - xval_unit_scale (bool): Whether to add an SI unit prefix to ``xval_unit`` if - needed. For example, when the x values represent time and + xlim (Union[Tuple[float, float], List[Tuple[float, float]]): Min and max value + of the horizontal axis. If not provided, it is automatically scaled based + on the input data points. If there are multiple columns in the canvas, + this could be a list of xlims. + ylim (Union[Tuple[float, float], List[Tuple[float, float]]): Min and max value + of the vertical axis. If not provided, it is automatically scaled based + on the input data points. If there are multiple rows in the canvas, + this could be a list of ylims. + xval_unit (Union[str, List[str]]): Unit of x values. + No scaling prefix is needed here as this is controlled by ``xval_unit_scale``. + If there are multiple columns in the canvas, this could be a list of xval_units. + yval_unit (Union[str, List[str]]): Unit of y values. + No scaling prefix is needed here as this is controlled by ``yval_unit_scale``. + If there are multiple rows in the canvas, this could be a list of yval_units. + xval_unit_scale (Union[bool, List[bool]]): Whether to add an SI unit prefix to + ``xval_unit`` if needed. For example, when the x values represent time and ``xval_unit="s"``, ``xval_unit_scale=True`` adds an SI unit prefix to ``"s"`` based on X values of plotted data. In the output figure, the prefix is automatically selected based on the maximum value in this axis. If your x values are in [1e-3, 1e-4], they are displayed as [1 ms, 10 ms]. By default, this option is set to ``True``. If ``False`` is provided, the axis numbers will be displayed in the scientific notation. - yval_unit_scale (bool): Whether to add an SI unit prefix to ``yval_unit`` if - needed. See ``xval_unit_scale`` for details. + If there are multiple columns in the canvas, this could be a list of xval_unit_scale. + yval_unit_scale (Union[bool, List[bool]]): Whether to add an SI unit prefix to + ``yval_unit`` if needed. See ``xval_unit_scale`` for details. + If there are multiple rows in the canvas, this could be a list of yval_unit_scale. xscale (str): The scaling of the x-axis, such as ``log`` or ``linear``. - yscale (str): See ``xscale`` for details. + yscale (str): The scaling of the y-axis, such as ``log`` or ``linear``. figure_title (str): Title of the figure. Defaults to None, i.e. nothing is shown. + sharex (bool): Set True to share x-axis ticks among sub-plots. + sharey (bool): Set True to share y-axis ticks among sub-plots. series_params (Dict[str, Dict[str, Any]]): A dictionary of parameters for each series. This is keyed on the name for each series. Sub-dictionary is expected to have the following three configurations, "canvas", @@ -226,7 +237,7 @@ def _default_figure_options(cls) -> Options: overwrites style parameters in ``default_style`` in :attr:`options`. Defaults to an empty PlotStyle instance (i.e., ``PlotStyle()``). """ - return Options( + options = Options( xlabel=None, ylabel=None, xlim=None, @@ -237,10 +248,16 @@ def _default_figure_options(cls) -> Options: yval_unit_scale=True, xscale=None, yscale=None, + sharex=True, + sharey=True, figure_title=None, series_params={}, custom_style=PlotStyle(), ) + options.set_validator("xscale", ["linear", "log", "symlog", "logit", "quadratic", None]) + options.set_validator("yscale", ["linear", "log", "symlog", "logit", "quadratic", None]) + + return options def set_options(self, **fields): """Set the drawer options. diff --git a/qiskit_experiments/visualization/drawers/mpl_drawer.py b/qiskit_experiments/visualization/drawers/mpl_drawer.py index 5ea0e4b75c..8ddf696919 100644 --- a/qiskit_experiments/visualization/drawers/mpl_drawer.py +++ b/qiskit_experiments/visualization/drawers/mpl_drawer.py @@ -12,6 +12,7 @@ """Curve drawer for matplotlib backend.""" +import numbers from typing import Any, Dict, List, Optional, Sequence, Tuple, Union import numpy as np @@ -79,6 +80,9 @@ def initialize_canvas(self): else: axis = self.options.axis + sharex = self.figure_options.sharex + sharey = self.figure_options.sharey + n_rows, n_cols = self.options.subplots n_subplots = n_cols * n_rows if n_subplots > 1: @@ -99,9 +103,9 @@ def initialize_canvas(self): inset_ax_h, ] sub_ax = axis.inset_axes(bounds, transform=axis.transAxes, zorder=1) - if j != 0: + if j != 0 and sharey: # remove y axis except for most-left plot - sub_ax.set_yticklabels([]) + sub_ax.yaxis.set_tick_params(labelleft=False) else: # this axis locates at left, write y-label if self.figure_options.ylabel: @@ -110,9 +114,9 @@ def initialize_canvas(self): # Y label can be given as a list for each sub axis label = label[i] sub_ax.set_ylabel(label, fontsize=self.style["axis_label_size"]) - if i != n_rows - 1: + if i != n_rows - 1 and sharex: # remove x axis except for most-bottom plot - sub_ax.set_xticklabels([]) + sub_ax.xaxis.set_tick_params(labelleft=False) else: # this axis locates at bottom, write x-label if self.figure_options.xlabel: @@ -143,115 +147,126 @@ def format_canvas(self): else: all_axes = [self._axis] - # Add data labels if there are multiple labels registered per sub_ax. - for sub_ax in all_axes: + # Get axis formatter from drawing options + formatter_opts = {} + for ax_type in ("x", "y"): + limit = self.figure_options.get(f"{ax_type}lim") + unit = self.figure_options.get(f"{ax_type}val_unit") + unit_scale = self.figure_options.get(f"{ax_type}val_unit_scale") + + # Format options to a list for each axis + if limit is None or isinstance(limit[0], numbers.Number): + limit = [limit] * len(all_axes) + if unit is None or isinstance(unit, str): + unit = [unit] * len(all_axes) + if isinstance(unit_scale, bool): + unit_scale = [unit_scale] * len(all_axes) + + # Compute min-max value for auto scaling + min_vals = [] + max_vals = [] + for sub_ax in all_axes: + if ax_type == "x": + min_v, max_v = sub_ax.get_xlim() + else: + min_v, max_v = sub_ax.get_ylim() + min_vals.append(min_v) + max_vals.append(max_v) + + formatter_opts[ax_type] = { + "limit": limit, + "unit": unit, + "unit_scale": unit_scale, + "min_ax_vals": min_vals, + "max_ax_vals": max_vals, + } + + def signed_sqrt(x): + return np.sign(x) * np.sqrt(abs(x)) + + def signed_square(x): + return np.sign(x) * x**2 + + for i, sub_ax in enumerate(all_axes): + # Add data labels if there are multiple labels registered per sub_ax. _, labels = sub_ax.get_legend_handles_labels() if len(labels) > 1: sub_ax.legend(loc=self.style["legend_loc"]) - # Format x and y axis - for ax_type in ("x", "y"): - # Get axis formatter from drawing options - if ax_type == "x": - lim = self.figure_options.xlim - unit = self.figure_options.xval_unit - unit_scale = self.figure_options.xval_unit_scale - else: - lim = self.figure_options.ylim - unit = self.figure_options.yval_unit - unit_scale = self.figure_options.yval_unit_scale - - # Compute data range from auto scale - if not lim: - v0 = np.nan - v1 = np.nan - for sub_ax in all_axes: - if ax_type == "x": - this_v0, this_v1 = sub_ax.get_xlim() - else: - this_v0, this_v1 = sub_ax.get_ylim() - v0 = np.nanmin([v0, this_v0]) - v1 = np.nanmax([v1, this_v1]) - lim = (v0, v1) - - # Format scaling (log, quadratic, linear, etc.) - def signed_sqrt(x): - return np.sign(x) * np.sqrt(abs(x)) - - def signed_square(x): - return np.sign(x) * x**2 + for ax_type in ("x", "y"): + limit = formatter_opts[ax_type]["limit"][i] + unit = formatter_opts[ax_type]["unit"][i] + unit_scale = formatter_opts[ax_type]["unit_scale"][i] + scale = self.figure_options.get(f"{ax_type}scale") + min_ax_vals = formatter_opts[ax_type]["min_ax_vals"] + max_ax_vals = formatter_opts[ax_type]["max_ax_vals"] + share_axis = self.figure_options.get(f"share{ax_type}") - for sub_ax in all_axes: if ax_type == "x": - scale_opt = self.figure_options.xscale - scale_function = sub_ax.set_xscale + mpl_setscale = sub_ax.set_xscale + mpl_axis_obj = getattr(sub_ax, "xaxis") + mpl_setlimit = sub_ax.set_xlim + mpl_share = sub_ax.sharex else: - scale_opt = self.figure_options.yscale - scale_function = sub_ax.set_yscale - - if scale_opt is not None: - if scale_opt == "quadratic": - scale_function("function", functions=(signed_square, signed_sqrt)) + mpl_setscale = sub_ax.set_yscale + mpl_axis_obj = getattr(sub_ax, "yaxis") + mpl_setlimit = sub_ax.set_ylim + mpl_share = sub_ax.sharey + + if limit is None: + if share_axis: + limit = min(min_ax_vals), max(max_ax_vals) else: - scale_function(scale_opt) - - # Format axis number notation - if unit and unit_scale: - # If value is specified, automatically scale axis magnitude - # and write prefix to axis label, i.e. 1e3 Hz -> 1 kHz - maxv = max(np.abs(lim[0]), np.abs(lim[1])) - try: - scaled_maxv, prefix = detach_prefix(maxv, decimal=3) - prefactor = scaled_maxv / maxv - except ValueError: - prefix = "" - prefactor = 1 - - formatter = MplDrawer.PrefixFormatter(prefactor) - units_str = f" [{prefix}{unit}]" - else: - # Use scientific notation with 3 digits, 1000 -> 1e3 - formatter = ScalarFormatter() - formatter.set_scientific(True) - formatter.set_powerlimits((-3, 3)) - - units_str = f" [{unit}]" if unit else "" + limit = min_ax_vals[i], max_ax_vals[i] - for sub_ax in all_axes: - if ax_type == "x": - ax = getattr(sub_ax, "xaxis") - tick_labels = sub_ax.get_xticklabels() + # Apply non linear axis spacing + if scale is not None: + if scale == "quadratic": + mpl_setscale("function", functions=(signed_square, signed_sqrt)) + else: + mpl_setscale(scale) + + # Create formatter for axis tick label notation + if unit and unit_scale: + # If value is specified, automatically scale axis magnitude + # and write prefix to axis label, i.e. 1e3 Hz -> 1 kHz + maxv = max(np.abs(limit[0]), np.abs(limit[1])) + try: + scaled_maxv, prefix = detach_prefix(maxv, decimal=3) + prefactor = scaled_maxv / maxv + except ValueError: + prefix = "" + prefactor = 1 + formatter = MplDrawer.PrefixFormatter(prefactor) + units_str = f" [{prefix}{unit}]" else: - ax = getattr(sub_ax, "yaxis") - tick_labels = sub_ax.get_yticklabels() - - if tick_labels: - # Set formatter only when tick labels exist - ax.set_major_formatter(formatter) + # Use scientific notation with 3 digits, 1000 -> 1e3 + formatter = ScalarFormatter() + formatter.set_scientific(True) + formatter.set_powerlimits((-3, 3)) + units_str = f" [{unit}]" if unit else "" + mpl_axis_obj.set_major_formatter(formatter) + + # Add units to axis label if both exist if units_str: - # Add units to label if both exist - label_txt_obj = ax.get_label() + label_txt_obj = mpl_axis_obj.get_label() label_str = label_txt_obj.get_text() if label_str: label_txt_obj.set_text(label_str + units_str) - # Auto-scale all axes to the first sub axis - if ax_type == "x": - # get_shared_y_axes() is immutable from matplotlib>=3.6.0. Must use Axis.sharey() - # instead, but this can only be called once per axis. Here we call sharey on all axes in - # a chain, which should have the same effect. - if len(all_axes) > 1: - for ax1, ax2 in zip(all_axes[1:], all_axes[0:-1]): - ax1.sharex(ax2) - all_axes[0].set_xlim(lim) - else: - # get_shared_y_axes() is immutable from matplotlib>=3.6.0. Must use Axis.sharey() - # instead, but this can only be called once per axis. Here we call sharey on all axes in - # a chain, which should have the same effect. - if len(all_axes) > 1: - for ax1, ax2 in zip(all_axes[1:], all_axes[0:-1]): - ax1.sharey(ax2) - all_axes[0].set_ylim(lim) + # Consider axis sharing among subplots + if share_axis: + if i == 0: + # Limit is set to the first axis only. + mpl_setlimit(limit) + else: + # get_shared_*_axes() is immutable from matplotlib>=3.6.0. + # Must use Axis.share*() instead, but this can only be called once per axis. + # Here we call share* on all axes in a chain, which should have the same effect. + mpl_share(all_axes[i - 1]) + else: + mpl_setlimit(limit) + # Add title if self.figure_options.figure_title is not None: self._axis.set_title( diff --git a/qiskit_experiments/visualization/plotters/base_plotter.py b/qiskit_experiments/visualization/plotters/base_plotter.py index c0b86eeb3d..2810184b8d 100644 --- a/qiskit_experiments/visualization/plotters/base_plotter.py +++ b/qiskit_experiments/visualization/plotters/base_plotter.py @@ -421,43 +421,49 @@ def _default_figure_options(cls) -> Options: """Return default figure options. Figure Options: - xlabel (Union[str, List[str]]): X-axis label string of the output figure. - If there are multiple columns in the canvas, this could be a list of - labels. - ylabel (Union[str, List[str]]): Y-axis label string of the output figure. - If there are multiple rows in the canvas, this could be a list of - labels. - xlim (Tuple[float, float]): Min and max value of the horizontal axis. - If not provided, it is automatically scaled based on the input data - points. - ylim (Tuple[float, float]): Min and max value of the vertical axis. - If not provided, it is automatically scaled based on the input data - points. - xval_unit (str): Unit of x values. No scaling prefix is needed here as this - is controlled by ``xval_unit_scale``. - yval_unit (str): Unit of y values. See ``xval_unit`` for details. - xval_unit_scale (bool): Whether to add an SI unit prefix to ``xval_unit`` if - needed. For example, when the x values represent time and + xlabel (Union[str, List[str]]): X-axis label string of the output figure. If + there are multiple columns in the canvas, this could be a list of labels. + ylabel (Union[str, List[str]]): Y-axis label string of the output figure. If + there are multiple rows in the canvas, this could be a list of labels. + xlim (Union[Tuple[float, float], List[Tuple[float, float]]): Min and max value + of the horizontal axis. If not provided, it is automatically scaled based + on the input data points. If there are multiple columns in the canvas, + this could be a list of xlims. + ylim (Union[Tuple[float, float], List[Tuple[float, float]]): Min and max value + of the vertical axis. If not provided, it is automatically scaled based + on the input data points. If there are multiple rows in the canvas, + this could be a list of ylims. + xval_unit (Union[str, List[str]]): Unit of x values. + No scaling prefix is needed here as this is controlled by ``xval_unit_scale``. + If there are multiple columns in the canvas, this could be a list of xval_units. + yval_unit (Union[str, List[str]]): Unit of y values. + No scaling prefix is needed here as this is controlled by ``yval_unit_scale``. + If there are multiple rows in the canvas, this could be a list of yval_units. + xval_unit_scale (Union[bool, List[bool]]): Whether to add an SI unit prefix to + ``xval_unit`` if needed. For example, when the x values represent time and ``xval_unit="s"``, ``xval_unit_scale=True`` adds an SI unit prefix to ``"s"`` based on X values of plotted data. In the output figure, the prefix is automatically selected based on the maximum value in this axis. If your x values are in [1e-3, 1e-4], they are displayed as [1 ms, 10 ms]. By default, this option is set to ``True``. If ``False`` is provided, the axis numbers will be displayed in the scientific notation. - yval_unit_scale (bool): Whether to add an SI unit prefix to ``yval_unit`` if - needed. See ``xval_unit_scale`` for details. - xscale (str): A parameter to the function ``set_xscale()`` to set the - x-axis scaling. Available options are ``log``, ``linear``, ``symlog``, - ``logit``, and ``quadratic``. - yscale (str): See ``xscale`` for details. + If there are multiple columns in the canvas, this could be a list of xval_unit_scale. + yval_unit_scale (Union[bool, List[bool]]): Whether to add an SI unit prefix to + ``yval_unit`` if needed. See ``xval_unit_scale`` for details. + If there are multiple rows in the canvas, this could be a list of yval_unit_scale. + xscale (str): The scaling of the x-axis, such as ``log`` or ``linear``. + yscale (str): The scaling of the y-axis, such as ``log`` or ``linear``. figure_title (str): Title of the figure. Defaults to None, i.e. nothing is shown. - series_params (Dict[SeriesName, Dict[str, Any]]): A dictionary of plot - parameters for each series. This is keyed on the name for each series. - Sub-dictionary is expected to have following three configurations, - "canvas", "color", and "symbol"; "canvas" is the integer index of axis - (when multi-canvas plot is set), "color" is the color of the curve, and - "symbol" is the marker Style of the curve for scatter plots. + sharex (bool): Set True to share x-axis ticks among sub-plots. + sharey (bool): Set True to share y-axis ticks among sub-plots. + series_params (Dict[str, Dict[str, Any]]): A dictionary of parameters for + each series. This is keyed on the name for each series. Sub-dictionary + is expected to have the following three configurations, "canvas", + "color", "symbol" and "label"; "canvas" is the integer index of axis + (when multi-canvas plot is set), "color" is the color of the drawn + graphics, "symbol" is the series marker style for scatter plots, and + "label" is a user provided series label that appears in the legend. """ options = Options( xlabel=None, @@ -470,12 +476,13 @@ def _default_figure_options(cls) -> Options: yval_unit_scale=True, xscale=None, yscale=None, + sharex=True, + sharey=True, figure_title=None, series_params={}, ) - - options.set_validator("xscale", ["linear", "log", "symlog", "logit", "quadratic"]) - options.set_validator("yscale", ["linear", "log", "symlog", "logit", "quadratic"]) + options.set_validator("xscale", ["linear", "log", "symlog", "logit", "quadratic", None]) + options.set_validator("yscale", ["linear", "log", "symlog", "logit", "quadratic", None]) return options diff --git a/releasenotes/notes/0.3/cleanup-cr-hamiltonian-experiment-7f47c51d26941f16.yaml b/releasenotes/notes/0.3/cleanup-cr-hamiltonian-experiment-7f47c51d26941f16.yaml index 4828ae44c6..a05dcc704a 100644 --- a/releasenotes/notes/0.3/cleanup-cr-hamiltonian-experiment-7f47c51d26941f16.yaml +++ b/releasenotes/notes/0.3/cleanup-cr-hamiltonian-experiment-7f47c51d26941f16.yaml @@ -6,5 +6,5 @@ upgrade: setting backend for just checking experiment sequence. The sequence with actual parameters is generated after the backend is set. In addition, now experiments can take ``cr_gate`` in the constractor which is ``Gate`` type subclass taking a single parameter (flat-top width). - If one inputs a :class:`~qiskit.extensions.hamiltonian_gate.HamiltonianGate` subclass with + If one inputs a :class:`~qiskit.circuit.library.HamiltonianGate` subclass with cross resonance Hamiltonian, experiment can be simulated with Aer QASM simulator. diff --git a/releasenotes/notes/access_figure_without_extension-5b7438c19e223d6b.yaml b/releasenotes/notes/access_figure_without_extension-5b7438c19e223d6b.yaml new file mode 100644 index 0000000000..bb7d34a29d --- /dev/null +++ b/releasenotes/notes/access_figure_without_extension-5b7438c19e223d6b.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Figures in `ExperimentData` objects can now be accessed without '.svg' extension. \ No newline at end of file diff --git a/releasenotes/notes/add-dataframe-curve-data-a8905c450748b281.yaml b/releasenotes/notes/add-dataframe-curve-data-a8905c450748b281.yaml new file mode 100644 index 0000000000..7af3b7320f --- /dev/null +++ b/releasenotes/notes/add-dataframe-curve-data-a8905c450748b281.yaml @@ -0,0 +1,16 @@ +--- +features: + - | + :class:`.ScatterTable` is introduced as a drop-in replacement of :class:`.CurveData`. + + This is a data format to store intermediate data in curve analysis, built on top of + the pandas DataFrame. Each table row corresponds to a single data point, + and the table contains all data points generated by the :class:`.CurveAnalysis`. + All properties and methods of :class:`.CurveData` are implemented for backward compatibility, + but these will be removed in the future release. +developer: + - | + :meth:`.CurveAnalysis._create_figures` method is added to the curve analysis base class. + A curve analysis subclass can overwrite this method to customize the output figures. + The method is called with the :class:`.ScatterTable` containing all intermediate data points + generated during the curve analysis. diff --git a/releasenotes/notes/add-support-for-visualization-with-unshared-axis-9f7bfe272353086b.yaml b/releasenotes/notes/add-support-for-visualization-with-unshared-axis-9f7bfe272353086b.yaml new file mode 100644 index 0000000000..8c801288ee --- /dev/null +++ b/releasenotes/notes/add-support-for-visualization-with-unshared-axis-9f7bfe272353086b.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + The :class:`.MplDrawer` visualization backend has been upgraded so that + it can take list of options for ``xlim``, ``ylim``, ``xval_unit``, ``yval_unit``, + ``xval_unit_scale``, and ``yval_unit_scale``. New figure options + ``sharex`` and ``sharey`` are also added. The new options are used to unkink the + configuration of sub axes, and default to ``True`` for backward compatibility. + By disabling these options, an experiment author can write an analysis class that + generates a multi-axes figure with different plot ranges. diff --git a/releasenotes/notes/add_warning_analysis_without_data-bfc802da52591f13.yaml b/releasenotes/notes/add_warning_analysis_without_data-bfc802da52591f13.yaml new file mode 100644 index 0000000000..986eb7a39e --- /dev/null +++ b/releasenotes/notes/add_warning_analysis_without_data-bfc802da52591f13.yaml @@ -0,0 +1,4 @@ +--- +other: + - | + Display a warning when running an analysis on `ExperimentData` objects which do not contain data. \ No newline at end of file diff --git a/releasenotes/notes/broadcasting-option-8a3b72bfc1df9668.yaml b/releasenotes/notes/broadcasting-option-8a3b72bfc1df9668.yaml new file mode 100644 index 0000000000..e99e7a1b8e --- /dev/null +++ b/releasenotes/notes/broadcasting-option-8a3b72bfc1df9668.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Added a `broadcast` option to the :class:`.CompositeAnalysis`. When `broadcast=True` is passed, + this option will be applied to child experiment analyses within the class. This means it will iterate + through the child analysis classes and apply the given option to each of + them. diff --git a/releasenotes/notes/bugfix_expdata_copy-2c73a21ad720858d.yaml b/releasenotes/notes/bugfix_expdata_copy-2c73a21ad720858d.yaml new file mode 100644 index 0000000000..cec8f54e99 --- /dev/null +++ b/releasenotes/notes/bugfix_expdata_copy-2c73a21ad720858d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + 'copy' method under ExperimentData class now copies the provider. + \ No newline at end of file diff --git a/releasenotes/notes/circuit-count-method-a095bd74aaa1d2fb.yaml b/releasenotes/notes/circuit-count-method-a095bd74aaa1d2fb.yaml new file mode 100644 index 0000000000..d8080ae02b --- /dev/null +++ b/releasenotes/notes/circuit-count-method-a095bd74aaa1d2fb.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + A new method :meth:`BaseExperiment.job_info` has been added that will + output the number of jobs the experiment is expected to be split into + based on the provided backend. + - | + Refer to issue + `#1247 https://github.com/Qiskit-Extensions/qiskit-experiments/issues/1247` + for more details. + \ No newline at end of file diff --git a/releasenotes/notes/fix-curve-fit-weights-fb43d3aa5ed1c91c.yaml b/releasenotes/notes/fix-curve-fit-weights-fb43d3aa5ed1c91c.yaml new file mode 100644 index 0000000000..98b1172c52 --- /dev/null +++ b/releasenotes/notes/fix-curve-fit-weights-fb43d3aa5ed1c91c.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fix calculation of weight for curve fitting. Previously the weights of data points to obtain + the residual of fit curve were computed by the inverse of the error bars of y data. + This may yield significant weights on certain data points when their error bar is small or zero, + and this can cause the local overfit to these data points. + To avoid this edge case of small error bars, computed weights are now clipped at 90 percentile. + This update might slightly change the outcome of fit. diff --git a/releasenotes/notes/half-angle-x-600debac368ce2c6.yaml b/releasenotes/notes/half-angle-x-600debac368ce2c6.yaml new file mode 100644 index 0000000000..d1af5944f2 --- /dev/null +++ b/releasenotes/notes/half-angle-x-600debac368ce2c6.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + The :class:`.HalfAngle` experiment's circuits were changed so that they use + combinations of ``rz`` and ``x`` instead of the less standard ``y`` gate. + This change allows :class:`~HalfAngle` to be run on IBM backends directly. + Previously, it could only be run through the :class:`~HalfAngleCal` + subclass in combination with a :class:`~Calibrations` instance containing a + custom calibration for the ``y`` gate. + Fixes issue `#1233 `_. diff --git a/releasenotes/notes/qiskit-dependency-3f6b8d71cc4d2c31.yaml b/releasenotes/notes/qiskit-dependency-3f6b8d71cc4d2c31.yaml new file mode 100644 index 0000000000..f2b29811b4 --- /dev/null +++ b/releasenotes/notes/qiskit-dependency-3f6b8d71cc4d2c31.yaml @@ -0,0 +1,15 @@ +--- +upgrade: + - | + The dependency on ``qiskit-terra`` was replaced with a dependency on + ``qiskit``. This change follows the move in upstream Qiskit to rename + ``qiskit-terra`` to ``qiskit``. The minimum required version was increased + from 0.24 for ``qiskit-terra`` to 0.44 for ``qiskit`` (equivalent to + `qiskit-terra`` 0.25). The maximum ``qiskit`` version was set to ``<1.0`` + in anticipation of breaking changes in Qiskit 1.0. Closer to the Qiskit 1.0 + release, a patch release of ``qiskit-experiments`` will remove this + constraint and address any compatibility issues. For more information see + the `Qiskit repository renaming plan + `__ + and the `Qiskit issue `__ + for the renaming of the package. diff --git a/releasenotes/notes/rabi-and-qv-bugfix-34636baee6651af1.yaml b/releasenotes/notes/rabi-and-qv-bugfix-34636baee6651af1.yaml index 8c06aff21d..b6119d299c 100644 --- a/releasenotes/notes/rabi-and-qv-bugfix-34636baee6651af1.yaml +++ b/releasenotes/notes/rabi-and-qv-bugfix-34636baee6651af1.yaml @@ -5,4 +5,6 @@ fixes: `qiskit-ibm-provider` using custom amplitudes provided as a numpy array. - | Resolved an issue that caused QV experiments to fail when executed via `qiskit-ibm-provider` using - Qiskit Terra for calculating ideal probabilities, instead of Aer. \ No newline at end of file + Qiskit Terra for calculating ideal probabilities, instead of Aer. + - | + Resolved a serialization issue that affected DRAG experiments with integral beta values specified. \ No newline at end of file diff --git a/releasenotes/notes/rb-v2-none-coupling-fda2b22afdef507b.yaml b/releasenotes/notes/rb-v2-none-coupling-fda2b22afdef507b.yaml new file mode 100644 index 0000000000..4df8316ac8 --- /dev/null +++ b/releasenotes/notes/rb-v2-none-coupling-fda2b22afdef507b.yaml @@ -0,0 +1,12 @@ +--- +fixes: + - | + Changed :class:`.StandardRB` to treat two qubit operations in the + :class:`qiskit.transpiler.Target` as having all-to-all connectivity if + there is no set of specific pairs of coupled qubits. Most importantly, this + change allows :class:`.StandardRB` to work with + :class:`qiskit_aer.AerSimulator` for multi-qubit benchmarking after + ``qiskit-aer`` 0.13.0. Version 0.13.0 of ``qiskit-aer`` changed + the default :class:`qiskit_aer.AerSimulator` to have such a + :class:`qiskit.transpiler.Target` without specific coupled pairs. + See `#1292 `__. diff --git a/releasenotes/notes/remove-tomo-reset-3f21ec4d0dacba91.yaml b/releasenotes/notes/remove-tomo-reset-3f21ec4d0dacba91.yaml new file mode 100644 index 0000000000..702da2cc1b --- /dev/null +++ b/releasenotes/notes/remove-tomo-reset-3f21ec4d0dacba91.yaml @@ -0,0 +1,8 @@ +--- +other: + - | + Removed the reset instruction from the beginning of tomography experiments. + Since qubits are usually reset between circuits, this change should have no + impact on tomography experiments, but it should allow backends that do not + provide a reset instruction to run tomography experiments. `#1250 + ` diff --git a/releasenotes/notes/selective-figure-generation-0864216f34d3486f.yaml b/releasenotes/notes/selective-figure-generation-0864216f34d3486f.yaml new file mode 100644 index 0000000000..14ac846d53 --- /dev/null +++ b/releasenotes/notes/selective-figure-generation-0864216f34d3486f.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + The ``generate_figures`` parameter has been added to :class:`.CompositeAnalysis` to control figure + generation. By default, ``generate_figures`` is ``always``, meaning figures will always be generated. + If ``generate_figures`` is set to ``selective``, then only figures for analysis results of bad + quality will be generated. If ``generate_figures`` is set to ``never``, then figures will never be + generated. This behavior can be overridden for individual analyses by setting the analysis option + ``plot`` for :class:`.CurveAnalysis`. diff --git a/releasenotes/notes/setter-methods-for-experiment-099074e59faffb49.yaml b/releasenotes/notes/setter-methods-for-experiment-099074e59faffb49.yaml new file mode 100644 index 0000000000..00d66c572c --- /dev/null +++ b/releasenotes/notes/setter-methods-for-experiment-099074e59faffb49.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Added ``experiment_type`` as optional ``__init__`` kwarg in :class:`.BatchExperiment` + and :class:`.ParallelExperiment`. + - | + ``experiment_type`` can now be easily set and retrieved from the experiment + object post-construction using the ``experiment_type`` property and setter. diff --git a/requirements-dev.txt b/requirements-dev.txt index 98abe635b4..24c7719517 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,10 @@ +qiskit>=0.45.0 black~=22.0 +fixtures stestr -pylint~=2.16.2 -astroid~=2.14.2 # Must be kept aligned to what pylint wants +testtools +pylint~=3.0.2 +astroid~=3.0.1 # Must be kept aligned to what pylint wants jinja2==3.0.3 sphinx>=6.2.1,<=7 jupyter-sphinx>=0.4.0 @@ -12,7 +15,6 @@ reno>=4.0.0 nbsphinx arxiv ddt>=1.6.0 -pandas>=1.1.5 pylatexenc multimethod sphinx-copybutton @@ -21,4 +23,4 @@ coverage>=5.5 ipykernel<=6.21.3 jupyter-client<=8.0.3 ipython<8.13.0 ; python_version<"3.9" # for python 3.8 compatibility -sphinx-remove-toctrees \ No newline at end of file +sphinx-remove-toctrees diff --git a/requirements-extras.txt b/requirements-extras.txt index 59466ff8cd..a1fc84cc89 100644 --- a/requirements-extras.txt +++ b/requirements-extras.txt @@ -1,5 +1,5 @@ qiskit-ibm-provider>=0.6.1 # for submitting experiments to backends through the IBM provider cvxpy>=1.3.2 # for tomography scikit-learn # for discriminators -qiskit-aer>=0.11.0 # for QV simulations +qiskit-aer>=0.11.0 qiskit_dynamics>=0.4.0 # for the PulseBackend diff --git a/requirements.txt b/requirements.txt index 3fa61a3e18..20f645b1a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,9 @@ numpy>=1.17 scipy>=1.4 -qiskit-terra>=0.24 +qiskit>=0.44 qiskit-ibm-experiment>=0.3.4 matplotlib>=3.4 uncertainties lmfit -rustworkx \ No newline at end of file +rustworkx +pandas>=1.1.5 diff --git a/test/base.py b/test/base.py index b0a4aa15d0..028b549b7b 100644 --- a/test/base.py +++ b/test/base.py @@ -13,13 +13,16 @@ Qiskit Experiments test case class """ +import os import json import pickle +import unittest import warnings from typing import Any, Callable, Optional +import fixtures +import testtools import uncertainties -from qiskit.test import QiskitTestCase from qiskit.utils.deprecation import deprecate_func from qiskit_experiments.framework import ( @@ -30,189 +33,256 @@ from qiskit_experiments.framework.experiment_data import ExperimentStatus from .extended_equality import is_equivalent +# Fail tests that take longer than this +TEST_TIMEOUT = int(os.environ.get("TEST_TIMEOUT", 60)) +# Use testtools by default as a (mostly) drop in replacement for +# unittest's TestCase. This will enable the fixtures used for capturing stdout +# stderr, and pylogging to attach the output to stestr's result stream. +USE_TESTTOOLS = os.environ.get("QE_USE_TESTTOOLS", "TRUE").lower() not in ("false", "0", "no") -class QiskitExperimentsTestCase(QiskitTestCase): - """Qiskit Experiments specific extra functionality for test cases.""" - - @classmethod - def setUpClass(cls): - """Set-up test class.""" - super().setUpClass() - - # Some functionality may be deprecated in Qiskit Experiments. If the deprecation warnings aren't - # filtered, the tests will fail as ``QiskitTestCase`` sets all warnings to be treated as an error - # by default. - # pylint: disable=invalid-name - allow_deprecationwarning_message = [ - # TODO: Remove in 0.6, when submodule `.curve_analysis.visualization` is removed. - r".*Plotting and drawing functionality has been moved", - r".*Legacy drawers from `.curve_analysis.visualization are deprecated", - ] - for msg in allow_deprecationwarning_message: - warnings.filterwarnings("default", category=DeprecationWarning, message=msg) - - def assertExperimentDone( - self, - experiment_data: ExperimentData, - timeout: float = 120, - ): - """Blocking execution of next line until all threads are completed then - checks if status returns Done. - - Args: - experiment_data: Experiment data to evaluate. - timeout: The maximum time in seconds to wait for executor to complete. - """ - experiment_data.block_for_results(timeout=timeout) - - self.assertEqual( - experiment_data.status(), - ExperimentStatus.DONE, - msg="All threads are executed but status is not DONE. " + experiment_data.errors(), + +def create_base_test_case(use_testtools: bool) -> unittest.TestCase: + """Create the base test case class for package tests + + This function produces the base class for qiskit-experiments tests using + either ``unittest.TestCase`` or ``testtools.TestCase`` for the base class. + The creation of the class is done in this function rather than directly + executed in the module so that, even when ``USE_TESTTOOLS`` is true, a + ``unittest`` base class can be produced for ``test_base.py`` to check that + no hard-dependence on ``testtools`` has been introduced. + """ + if use_testtools: + + class BaseTestCase(testtools.TestCase): + """Base test class.""" + + # testtools maintains their own version of assert functions which mostly + # behave as value adds to the std unittest assertion methods. However, + # for assertEquals and assertRaises modern unittest has diverged from + # the forks in testtools and offer more (or different) options that are + # incompatible testtools versions. Just use the stdlib versions so that + # our tests work as expected. + assertRaises = unittest.TestCase.assertRaises + assertEqual = unittest.TestCase.assertEqual + + def setUp(self): + super().setUp() + if os.environ.get("QISKIT_TEST_CAPTURE_STREAMS"): + stdout = self.useFixture(fixtures.StringStream("stdout")).stream + self.useFixture(fixtures.MonkeyPatch("sys.stdout", stdout)) + stderr = self.useFixture(fixtures.StringStream("stderr")).stream + self.useFixture(fixtures.MonkeyPatch("sys.stderr", stderr)) + self.useFixture(fixtures.LoggerFixture(nuke_handlers=False, level=None)) + + else: + + class BaseTestCase(unittest.TestCase): + """Base test class.""" + + def useFixture(self, fixture): # pylint: disable=invalid-name + """Shim so that useFixture can be called in subclasses + + useFixture is a testtools.TestCase method. The actual fixture is + not used when using unittest. + """ + + class QETestCase(BaseTestCase): + """Qiskit Experiments specific extra functionality for test cases.""" + + def setUp(self): + super().setUp() + self.useFixture(fixtures.Timeout(TEST_TIMEOUT, gentle=True)) + + @classmethod + def setUpClass(cls): + """Set-up test class.""" + super().setUpClass() + + warnings.filterwarnings("error", category=DeprecationWarning) + + # Some functionality may be deprecated in Qiskit Experiments. If + # the deprecation warnings aren't filtered, the tests will fail as + # ``QiskitTestCase`` sets all warnings to be treated as an error by + # default. + # pylint: disable=invalid-name + allow_deprecationwarning_message = [ + # TODO: Remove in 0.6, when submodule `.curve_analysis.visualization` is removed. + r".*Plotting and drawing functionality has been moved", + r".*Legacy drawers from `.curve_analysis.visualization are deprecated", + ] + for msg in allow_deprecationwarning_message: + warnings.filterwarnings("default", category=DeprecationWarning, message=msg) + + def assertExperimentDone( + self, + experiment_data: ExperimentData, + timeout: Optional[float] = None, + ): + """Blocking execution of next line until all threads are completed then + checks if status returns Done. + + Args: + experiment_data: Experiment data to evaluate. + timeout: The maximum time in seconds to wait for executor to + complete. Defaults to the value of ``TEST_TIMEOUT``. + """ + if timeout is None: + timeout = TEST_TIMEOUT + experiment_data.block_for_results(timeout=timeout) + + self.assertEqual( + experiment_data.status(), + ExperimentStatus.DONE, + msg="All threads are executed but status is not DONE. " + experiment_data.errors(), + ) + + def assertEqualExtended( + self, + first: Any, + second: Any, + *, + msg: Optional[str] = None, + strict_type: bool = False, + ): + """Extended equality assertion which covers Qiskit Experiments classes. + + .. note:: + Some Qiskit Experiments class may intentionally avoid implementing + the equality dunder method, or may be used in some unusual situations. + These are mainly caused by to JSON round trip situation, and some custom classes + doesn't guarantee object equality after round trip. + This assertion function forcibly compares input two objects with + the custom equality checker, which is implemented for unittest purpose. + + Args: + first: First object to compare. + second: Second object to compare. + msg: Optional. Custom error message issued when first and second object are not equal. + strict_type: Set True to enforce type check before comparison. + """ + default_msg = f"{first} != {second}" + + self.assertTrue( + is_equivalent(first, second, strict_type=strict_type), + msg=msg or default_msg, + ) + + def assertRoundTripSerializable( + self, + obj: Any, + *, + check_func: Optional[Callable] = None, + strict_type: bool = False, + ): + """Assert that an object is round trip serializable. + + Args: + obj: the object to be serialized. + check_func: Optional, a custom function ``check_func(a, b) -> bool`` + to check equality of the original object with the decoded + object. If None :meth:`.assertEqualExtended` is called. + strict_type: Set True to enforce type check before comparison. + """ + try: + encoded = json.dumps(obj, cls=ExperimentEncoder) + except TypeError: + self.fail("JSON serialization raised unexpectedly.") + try: + decoded = json.loads(encoded, cls=ExperimentDecoder) + except TypeError: + self.fail("JSON deserialization raised unexpectedly.") + + if check_func is not None: + self.assertTrue(check_func(obj, decoded), msg=f"{obj} != {decoded}") + else: + self.assertEqualExtended(obj, decoded, strict_type=strict_type) + + def assertRoundTripPickle( + self, + obj: Any, + *, + check_func: Optional[Callable] = None, + strict_type: bool = False, + ): + """Assert that an object is round trip serializable using pickle module. + + Args: + obj: the object to be serialized. + check_func: Optional, a custom function ``check_func(a, b) -> bool`` + to check equality of the original object with the decoded + object. If None :meth:`.assertEqualExtended` is called. + strict_type: Set True to enforce type check before comparison. + """ + try: + encoded = pickle.dumps(obj) + except TypeError: + self.fail("pickle raised unexpectedly.") + try: + decoded = pickle.loads(encoded) + except TypeError: + self.fail("pickle deserialization raised unexpectedly.") + + if check_func is not None: + self.assertTrue(check_func(obj, decoded), msg=f"{obj} != {decoded}") + else: + self.assertEqualExtended(obj, decoded, strict_type=strict_type) + + @classmethod + @deprecate_func( + since="0.6", + additional_msg="Use test.extended_equality.is_equivalent instead.", + pending=True, + package_name="qiskit-experiments", + ) + def json_equiv(cls, data1, data2) -> bool: + """Check if two experiments are equivalent by comparing their configs""" + return is_equivalent(data1, data2) + + @staticmethod + @deprecate_func( + since="0.6", + additional_msg="Use test.extended_equality.is_equivalent instead.", + pending=True, + package_name="qiskit-experiments", + ) + def ufloat_equiv(data1: uncertainties.UFloat, data2: uncertainties.UFloat) -> bool: + """Check if two values with uncertainties are equal. No correlation is considered.""" + return is_equivalent(data1, data2) + + @classmethod + @deprecate_func( + since="0.6", + additional_msg="Use test.extended_equality.is_equivalent instead.", + pending=True, + package_name="qiskit-experiments", ) + def analysis_result_equiv(cls, result1, result2): + """Test two analysis results are equivalent""" + return is_equivalent(result1, result2) - def assertEqualExtended( - self, - first: Any, - second: Any, - *, - msg: Optional[str] = None, - strict_type: bool = False, - ): - """Extended equality assertion which covers Qiskit Experiments classes. - - .. note:: - Some Qiskit Experiments class may intentionally avoid implementing - the equality dunder method, or may be used in some unusual situations. - These are mainly caused by to JSON round trip situation, and some custom classes - doesn't guarantee object equality after round trip. - This assertion function forcibly compares input two objects with - the custom equality checker, which is implemented for unittest purpose. - - Args: - first: First object to compare. - second: Second object to compare. - msg: Optional. Custom error message issued when first and second object are not equal. - strict_type: Set True to enforce type check before comparison. - """ - default_msg = f"{first} != {second}" - - self.assertTrue( - is_equivalent(first, second, strict_type=strict_type), - msg=msg or default_msg, + @classmethod + @deprecate_func( + since="0.6", + additional_msg="Use test.extended_equality.is_equivalent instead.", + pending=True, + package_name="qiskit-experiments", ) + def curve_fit_data_equiv(cls, data1, data2): + """Test two curve fit result are equivalent.""" + return is_equivalent(data1, data2) + + @classmethod + @deprecate_func( + since="0.6", + additional_msg="Use test.extended_equality.is_equivalent instead.", + pending=True, + package_name="qiskit-experiments", + ) + def experiment_data_equiv(cls, data1, data2): + """Check two experiment data containers are equivalent""" + return is_equivalent(data1, data2) + + return QETestCase + - def assertRoundTripSerializable( - self, - obj: Any, - *, - check_func: Optional[Callable] = None, - strict_type: bool = False, - ): - """Assert that an object is round trip serializable. - - Args: - obj: the object to be serialized. - check_func: Optional, a custom function ``check_func(a, b) -> bool`` - to check equality of the original object with the decoded - object. If None :meth:`.assertEqualExtended` is called. - strict_type: Set True to enforce type check before comparison. - """ - try: - encoded = json.dumps(obj, cls=ExperimentEncoder) - except TypeError: - self.fail("JSON serialization raised unexpectedly.") - try: - decoded = json.loads(encoded, cls=ExperimentDecoder) - except TypeError: - self.fail("JSON deserialization raised unexpectedly.") - - if check_func is not None: - self.assertTrue(check_func(obj, decoded), msg=f"{obj} != {decoded}") - else: - self.assertEqualExtended(obj, decoded, strict_type=strict_type) - - def assertRoundTripPickle( - self, - obj: Any, - *, - check_func: Optional[Callable] = None, - strict_type: bool = False, - ): - """Assert that an object is round trip serializable using pickle module. - - Args: - obj: the object to be serialized. - check_func: Optional, a custom function ``check_func(a, b) -> bool`` - to check equality of the original object with the decoded - object. If None :meth:`.assertEqualExtended` is called. - strict_type: Set True to enforce type check before comparison. - """ - try: - encoded = pickle.dumps(obj) - except TypeError: - self.fail("pickle raised unexpectedly.") - try: - decoded = pickle.loads(encoded) - except TypeError: - self.fail("pickle deserialization raised unexpectedly.") - - if check_func is not None: - self.assertTrue(check_func(obj, decoded), msg=f"{obj} != {decoded}") - else: - self.assertEqualExtended(obj, decoded, strict_type=strict_type) - - @classmethod - @deprecate_func( - since="0.6", - additional_msg="Use test.extended_equality.is_equivalent instead.", - pending=True, - package_name="qiskit-experiments", - ) - def json_equiv(cls, data1, data2) -> bool: - """Check if two experiments are equivalent by comparing their configs""" - return is_equivalent(data1, data2) - - @staticmethod - @deprecate_func( - since="0.6", - additional_msg="Use test.extended_equality.is_equivalent instead.", - pending=True, - package_name="qiskit-experiments", - ) - def ufloat_equiv(data1: uncertainties.UFloat, data2: uncertainties.UFloat) -> bool: - """Check if two values with uncertainties are equal. No correlation is considered.""" - return is_equivalent(data1, data2) - - @classmethod - @deprecate_func( - since="0.6", - additional_msg="Use test.extended_equality.is_equivalent instead.", - pending=True, - package_name="qiskit-experiments", - ) - def analysis_result_equiv(cls, result1, result2): - """Test two analysis results are equivalent""" - return is_equivalent(result1, result2) - - @classmethod - @deprecate_func( - since="0.6", - additional_msg="Use test.extended_equality.is_equivalent instead.", - pending=True, - package_name="qiskit-experiments", - ) - def curve_fit_data_equiv(cls, data1, data2): - """Test two curve fit result are equivalent.""" - return is_equivalent(data1, data2) - - @classmethod - @deprecate_func( - since="0.6", - additional_msg="Use test.extended_equality.is_equivalent instead.", - pending=True, - package_name="qiskit-experiments", - ) - def experiment_data_equiv(cls, data1, data2): - """Check two experiment data containers are equivalent""" - return is_equivalent(data1, data2) +QiskitExperimentsTestCase = create_base_test_case(USE_TESTTOOLS) diff --git a/test/curve_analysis/test_baseclass.py b/test/curve_analysis/test_baseclass.py index 4f49745c9d..7025dbd60f 100644 --- a/test/curve_analysis/test_baseclass.py +++ b/test/curve_analysis/test_baseclass.py @@ -17,6 +17,7 @@ from test.fake_experiment import FakeExperiment import numpy as np +from ddt import data, ddt, unpack from lmfit.models import ExpressionModel from qiskit.qobj.utils import MeasLevel @@ -79,6 +80,7 @@ def parallel_sampler(x, y1, y2, shots=10000, seed=123, **metadata): return expdata +@ddt class TestCurveAnalysis(CurveAnalysisTestCase): """A collection of CurveAnalysis unit tests and integration tests.""" @@ -133,10 +135,7 @@ def test_data_extraction(self): }, ) - curve_data = analysis._run_data_processing( - raw_data=expdata1.data() + expdata2.data(), - models=analysis._models, - ) + curve_data = analysis._run_data_processing(raw_data=expdata1.data() + expdata2.data()) self.assertListEqual(curve_data.labels, ["s1", "s2"]) # check data of series1 @@ -229,6 +228,7 @@ def test_end_to_end_single_function(self): self.assertAlmostEqual(result.analysis_results("amp").value.nominal_value, 0.5, delta=0.1) self.assertAlmostEqual(result.analysis_results("tau").value.nominal_value, 0.3, delta=0.1) + self.assertEqual(len(result._figures), 0) def test_end_to_end_multi_objective(self): """Integration test for multi objective function.""" @@ -398,15 +398,18 @@ def _initialize(self, experiment_data): self.assertAlmostEqual(result.analysis_results("amp").value.nominal_value, 0.5, delta=0.1) self.assertAlmostEqual(result.analysis_results("tau").value.nominal_value, 0.3, delta=0.1) - def test_end_to_end_parallel_analysis(self): - """Integration test for running two curve analyses in parallel.""" + @data((False, "always", 0), (True, "never", 2), (None, "always", 2), (None, "never", 0)) + @unpack + def test_end_to_end_parallel_analysis(self, plot_flag, figure_flag, n_figures): + """Integration test for running two curve analyses in parallel, including + selective figure generation.""" analysis1 = CurveAnalysis(models=[ExpressionModel(expr="amp * exp(-x/tau)", name="test")]) analysis1.set_options( data_processor=DataProcessor(input_key="counts", data_actions=[Probability("1")]), p0={"amp": 0.5, "tau": 0.3}, result_parameters=["amp", "tau"], - plot=False, + plot=plot_flag, ) analysis2 = CurveAnalysis(models=[ExpressionModel(expr="amp * exp(-x/tau)", name="test")]) @@ -414,10 +417,12 @@ def test_end_to_end_parallel_analysis(self): data_processor=DataProcessor(input_key="counts", data_actions=[Probability("1")]), p0={"amp": 0.7, "tau": 0.5}, result_parameters=["amp", "tau"], - plot=False, + plot=plot_flag, ) - composite = CompositeAnalysis([analysis1, analysis2], flatten_results=True) + composite = CompositeAnalysis( + [analysis1, analysis2], flatten_results=True, generate_figures=figure_flag + ) amp1 = 0.5 tau1 = 0.3 amp2 = 0.7 @@ -440,6 +445,90 @@ def test_end_to_end_parallel_analysis(self): self.assertAlmostEqual(taus[0].value.nominal_value, tau1, delta=0.1) self.assertAlmostEqual(taus[1].value.nominal_value, tau2, delta=0.1) + self.assertEqual(len(result._figures), n_figures) + + def test_selective_figure_generation(self): + """Test that selective figure generation based on quality works as expected.""" + + # analysis with intentionally bad fit + analysis1 = CurveAnalysis(models=[ExpressionModel(expr="amp * exp(-x)", name="test")]) + analysis1.set_options( + data_processor=DataProcessor(input_key="counts", data_actions=[Probability("1")]), + p0={"amp": 0.7}, + result_parameters=["amp"], + ) + analysis2 = CurveAnalysis(models=[ExpressionModel(expr="amp * exp(-x/tau)", name="test")]) + analysis2.set_options( + data_processor=DataProcessor(input_key="counts", data_actions=[Probability("1")]), + p0={"amp": 0.7, "tau": 0.5}, + result_parameters=["amp", "tau"], + ) + composite = CompositeAnalysis( + [analysis1, analysis2], flatten_results=False, generate_figures="selective" + ) + amp1 = 0.7 + tau1 = 0.5 + amp2 = 0.7 + tau2 = 0.5 + + x = np.linspace(0, 1, 100) + y1 = amp1 * np.exp(-x / tau1) + y2 = amp2 * np.exp(-x / tau2) + + test_data = self.parallel_sampler(x, y1, y2) + result = composite.run(test_data) + self.assertExperimentDone(result) + + for res in result.child_data(): + # only generate a figure if the quality is bad + if res.analysis_results(0).quality == "bad": + self.assertEqual(len(res._figures), 1) + else: + self.assertEqual(len(res._figures), 0) + + def test_end_to_end_zero_yerr(self): + """Integration test for an edge case of having zero y error. + + When the error bar is zero, the fit weights to compute residual tend to become larger. + When the weight is too much significant, the result locally overfits to + certain data points with smaller or zero y error. + """ + analysis = CurveAnalysis(models=[ExpressionModel(expr="amp * x**2", name="test")]) + analysis.set_options( + data_processor=DataProcessor(input_key="counts", data_actions=[Probability("1")]), + result_parameters=["amp"], + average_method="sample", # Use sample average to make some yerr = 0 + plot=False, + p0={"amp": 0.2}, + ) + + amp = 0.3 + x = np.linspace(0, 1, 100) + y = amp * x**2 + + # Replace small y values with zero. + # Since mock function samples count dictionary from binomial distribution, + # y=0 (or 1) yield always the same count dictionary + # and hence y error becomes zero with sample averaging. + # In this case, amp = 0 may yield the best result. + y[0] = 0 + y[1] = 0 + y[2] = 0 + + test_data1 = self.single_sampler(x, y, seed=123) + test_data2 = self.single_sampler(x, y, seed=124) + test_data3 = self.single_sampler(x, y, seed=125) + + expdata = ExperimentData(experiment=FakeExperiment()) + expdata.add_data(test_data1.data()) + expdata.add_data(test_data2.data()) + expdata.add_data(test_data3.data()) + + result = analysis.run(expdata) + self.assertExperimentDone(result) + + self.assertAlmostEqual(result.analysis_results("amp").value.nominal_value, amp, delta=0.1) + def test_get_init_params(self): """Integration test for getting initial parameter from overview entry.""" @@ -467,7 +556,9 @@ def test_get_init_params(self): y_reproduced = analysis.models[0].eval(x=x, **overview.init_params) np.testing.assert_array_almost_equal(y_ref, y_reproduced) - def test_multi_composite_curve_analysis(self): + @data((False, "never", 0), (True, "never", 1), (None, "never", 0), (None, "always", 1)) + @unpack + def test_multi_composite_curve_analysis(self, plot, gen_figures, n_figures): """Integration test for composite curve analysis. This analysis consists of two curve fittings for cos and sin series. @@ -505,7 +596,8 @@ def test_multi_composite_curve_analysis(self): group_analysis = CompositeCurveAnalysis(analyses) group_analysis.analyses("group_A").set_options(p0={"amp": 0.3, "freq": 2.1, "b": 0.5}) group_analysis.analyses("group_B").set_options(p0={"amp": 0.5, "freq": 3.2, "b": 0.5}) - group_analysis.set_options(plot=False) + group_analysis.set_options(plot=plot) + group_analysis._generate_figures = gen_figures amp1 = 0.2 amp2 = 0.4 @@ -543,6 +635,7 @@ def test_multi_composite_curve_analysis(self): self.assertEqual(amps[1].extra["group"], "group_B") self.assertAlmostEqual(amps[0].value.n, 0.2, delta=0.1) self.assertAlmostEqual(amps[1].value.n, 0.4, delta=0.1) + self.assertEqual(len(result._figures), n_figures) class TestFitOptions(QiskitExperimentsTestCase): diff --git a/test/database_service/test_db_experiment_data.py b/test/database_service/test_db_experiment_data.py index 6d6713248e..003d21af0f 100644 --- a/test/database_service/test_db_experiment_data.py +++ b/test/database_service/test_db_experiment_data.py @@ -14,6 +14,7 @@ """Test ExperimentData.""" from test.base import QiskitExperimentsTestCase +from test.fake_experiment import FakeExperiment import os from unittest import mock import copy @@ -46,6 +47,7 @@ ExperimentStatus, ) from qiskit_experiments.framework.matplotlib import get_non_gui_ax +from qiskit_experiments.test.fake_backend import FakeBackend class TestDbExperimentData(QiskitExperimentsTestCase): @@ -107,8 +109,8 @@ def test_add_data_result(self): def test_add_data_result_metadata(self): """Test add result metadata.""" exp_data = ExperimentData(backend=self.backend, experiment_type="qiskit_test") - result1 = self._get_job_result(1, has_metadata=False) - result2 = self._get_job_result(1, has_metadata=True) + result1 = self._get_job_result(1, no_metadata=True) + result2 = self._get_job_result(1) exp_data.add_data(result1) exp_data.add_data(result2) @@ -119,12 +121,14 @@ def test_add_data_job(self): """Test add job data.""" a_job = mock.create_autospec(Job, instance=True) a_job.result.return_value = self._get_job_result(3) + num_circs = 3 jobs = [] for _ in range(2): job = mock.create_autospec(Job, instance=True) - job.result.return_value = self._get_job_result(2) + job.result.return_value = self._get_job_result(2, label_from=num_circs) job.status.return_value = JobStatus.DONE jobs.append(job) + num_circs = num_circs + 2 expected = a_job.result().get_counts() for job in jobs: @@ -135,7 +139,13 @@ def test_add_data_job(self): self.assertExperimentDone(exp_data) exp_data.add_jobs(jobs) self.assertExperimentDone(exp_data) - self.assertEqual(expected, [sdata["counts"] for sdata in exp_data.data()]) + self.assertEqual( + expected, + [ + sdata["counts"] + for sdata in sorted(exp_data.data(), key=lambda x: x["metadata"]["label"]) + ], + ) self.assertIn(a_job.job_id(), exp_data.job_ids) def test_add_data_job_callback(self): @@ -401,6 +411,8 @@ def test_get_figure(self): exp_data = ExperimentData(experiment_type="qiskit_test") figure_template = "hello world {}" name_template = "figure_{}.svg" + name_template_wo_ext = "figure_{}" + for idx in range(3): exp_data.add_figures( str.encode(figure_template.format(idx)), figure_names=name_template.format(idx) @@ -410,6 +422,11 @@ def test_get_figure(self): self.assertEqual(expected_figure, exp_data.figure(name_template.format(idx)).figure) self.assertEqual(expected_figure, exp_data.figure(idx).figure) + # Check that figure will be returned without file extension in name + expected_figure = str.encode(figure_template.format(idx)) + self.assertEqual(expected_figure, exp_data.figure(name_template_wo_ext.format(idx)).figure) + self.assertEqual(expected_figure, exp_data.figure(idx).figure) + file_name = uuid.uuid4().hex self.addCleanup(os.remove, file_name) exp_data.figure(idx, file_name) @@ -1029,14 +1046,12 @@ def test_additional_attr(self): def test_copy_metadata(self): """Test copy metadata.""" - exp_data = ExperimentData(experiment_type="qiskit_test") + exp_data = FakeExperiment(experiment_type="qiskit_test").run(backend=FakeBackend()) exp_data.add_data(self._get_job_result(1)) - result = mock.MagicMock() - result.result_id = str(uuid.uuid4()) - exp_data.add_analysis_results(result) copied = exp_data.copy(copy_results=False) self.assertEqual(exp_data.data(), copied.data()) self.assertFalse(copied.analysis_results()) + self.assertEqual(exp_data.provider, copied.provider) def test_copy_metadata_pending_job(self): """Test copy metadata with a pending job.""" @@ -1073,7 +1088,7 @@ def _job2_result(): exp_data.data(0)["counts"], [copied.data(0)["counts"], copied.data(1)["counts"]] ) - def _get_job_result(self, circ_count, has_metadata=False): + def _get_job_result(self, circ_count, label_from=0, no_metadata=False): """Return a job result with random counts.""" job_result = { "backend_name": BackendData(self.backend).name, @@ -1085,12 +1100,12 @@ def _get_job_result(self, circ_count, has_metadata=False): } circ_result_template = {"shots": 1024, "success": True, "data": {}} - for _ in range(circ_count): + for i_circ in range(circ_count): counts = randrange(1024) circ_result = copy.copy(circ_result_template) circ_result["data"] = {"counts": {"0x0": counts, "0x3": 1024 - counts}} - if has_metadata: - circ_result["header"] = {"metadata": {"meas_basis": "pauli"}} + if not no_metadata: + circ_result["header"] = {"metadata": {"label": label_from + i_circ}} job_result["results"].append(circ_result) return Result.from_dict(job_result) diff --git a/test/extended_equality.py b/test/extended_equality.py index 751763b8ee..1f210e0ec5 100644 --- a/test/extended_equality.py +++ b/test/extended_equality.py @@ -28,7 +28,6 @@ from qiskit_experiments.curve_analysis.curve_data import CurveFitResult from qiskit_experiments.data_processing import DataAction, DataProcessor from qiskit_experiments.database_service.utils import ( - ThreadSafeDataFrame, ThreadSafeList, ThreadSafeOrderedDict, ) @@ -37,6 +36,7 @@ BaseExperiment, BaseAnalysis, AnalysisResult, + AnalysisResultTable, ) from qiskit_experiments.visualization import BaseDrawer @@ -276,15 +276,11 @@ def _check_configurable_classes( @_is_equivalent_dispatcher.register def _check_dataframes( - data1: Union[pd.DataFrame, ThreadSafeDataFrame], - data2: Union[pd.DataFrame, ThreadSafeDataFrame], + data1: pd.DataFrame, + data2: pd.DataFrame, **kwargs, ): """Check equality of data frame which may involve Qiskit Experiments class value.""" - if isinstance(data1, ThreadSafeDataFrame): - data1 = data1.container(collapse_extra=False) - if isinstance(data2, ThreadSafeDataFrame): - data2 = data2.container(collapse_extra=False) return is_equivalent( data1.to_dict(orient="index"), data2.to_dict(orient="index"), @@ -292,6 +288,20 @@ def _check_dataframes( ) +@_is_equivalent_dispatcher.register +def _check_result_table( + data1: AnalysisResultTable, + data2: AnalysisResultTable, + **kwargs, +): + """Check equality of data frame which may involve Qiskit Experiments class value.""" + return is_equivalent( + data1.copy().to_dict(orient="index"), + data2.copy().to_dict(orient="index"), + **kwargs, + ) + + @_is_equivalent_dispatcher.register def _check_experiment_data( data1: ExperimentData, diff --git a/test/fake_experiment.py b/test/fake_experiment.py index 5e4a2fa060..f904a7f4c4 100644 --- a/test/fake_experiment.py +++ b/test/fake_experiment.py @@ -48,11 +48,16 @@ def _default_experiment_options(cls) -> Options: options.dummyoption = None return options - def __init__(self, physical_qubits=None): + def __init__(self, physical_qubits=None, backend=None, experiment_type=None): """Initialise the fake experiment.""" if physical_qubits is None: physical_qubits = [0] - super().__init__(physical_qubits, analysis=FakeAnalysis()) + super().__init__( + physical_qubits, + analysis=FakeAnalysis(), + backend=backend, + experiment_type=experiment_type, + ) def circuits(self): """Fake circuits.""" diff --git a/test/framework/test_backend_timing.py b/test/framework/test_backend_timing.py index 5423106e63..c270795069 100644 --- a/test/framework/test_backend_timing.py +++ b/test/framework/test_backend_timing.py @@ -18,7 +18,7 @@ from qiskit import QiskitError from qiskit.providers.fake_provider import FakeNairobiV2 -from qiskit_experiments.framework import BackendData, BackendTiming +from qiskit_experiments.framework import BackendTiming @ddt @@ -30,16 +30,13 @@ def setUpClass(cls): super().setUpClass() # Creating a complete fake backend is difficult so we use one from - # terra. Just to be safe, we check that the properties we care about + # qiskit. Just to be safe, we check that the properties we care about # for these tests are never changed from what the tests assume. backend = FakeNairobiV2() - # Using BackendData to handle acquire/aquire rename. Can replace with - # target.acquire_alignment when testing against terra >=0.24 - backend_data = BackendData(backend) target = backend.target assumptions = ( (abs(target.dt * 4.5e9 - 1) < 1e-6) - and backend_data.acquire_alignment == 16 + and target.acquire_alignment == 16 and target.pulse_alignment == 1 and target.min_length == 64 and target.granularity == 16 @@ -47,7 +44,7 @@ def setUpClass(cls): if not assumptions: # pragma: no cover raise ValueError("FakeNairobiV2 properties have changed!") - cls.acquire_alignment = backend_data.acquire_alignment + cls.acquire_alignment = target.acquire_alignment cls.dt = target.dt cls.granularity = target.granularity cls.min_length = target.min_length diff --git a/test/framework/test_composite.py b/test/framework/test_composite.py index fc0d126b2f..b006d9f7c0 100644 --- a/test/framework/test_composite.py +++ b/test/framework/test_composite.py @@ -151,6 +151,21 @@ def test_roundtrip_serializable(self): self.assertRoundTripSerializable(exp) + def test_experiment_type(self): + """Test experiment_type setter.""" + + exp1 = FakeExperiment([0]) + + par_exp1 = ParallelExperiment([exp1], flatten_results=False) + batch_exp1 = BatchExperiment([exp1], flatten_results=False) + self.assertEqual(par_exp1.experiment_type, "ParallelExperiment") + self.assertEqual(batch_exp1.experiment_type, "BatchExperiment") + + par_exp2 = ParallelExperiment([exp1], flatten_results=False, experiment_type="yooo") + batch_exp2 = BatchExperiment([exp1], flatten_results=False, experiment_type="blaaa") + self.assertEqual(par_exp2.experiment_type, "yooo") + self.assertEqual(batch_exp2.experiment_type, "blaaa") + @ddt class TestCompositeExperimentData(QiskitExperimentsTestCase): @@ -624,6 +639,49 @@ def _default_options(cls): self.assertEqual(par_exp.analysis.component_analysis(0).options.option1, opt1_val) self.assertEqual(par_exp.analysis.component_analysis(1).options.option2, opt2_val) + def test_composite_analysis_options_cascade(self): + """Test setting component analysis options""" + + class Analysis(FakeAnalysis): + """Fake analysis class with options""" + + @classmethod + def _default_options(cls): + opts = super()._default_options() + opts.option1 = None + return opts + + exp1 = FakeExperiment([0]) + exp1.analysis = Analysis() + exp2 = FakeExperiment([1]) + exp2.analysis = Analysis() + par_exp1 = ParallelExperiment([exp1, exp2], flatten_results=True) + + exp3 = FakeExperiment([0]) + exp3.analysis = Analysis() + exp4 = FakeExperiment([1]) + exp4.analysis = Analysis() + par_exp2 = ParallelExperiment([exp3, exp4], flatten_results=True) + + # Set a batch experiment + batch_exp = BatchExperiment([par_exp1, par_exp2], flatten_results=True) + + # Set new option to the experiment + exp_list = [exp1, exp2, exp3, exp4] + opt1_vals = [9000, 8000, 7000, 6000] + for exp, opt1_val in zip(exp_list, opt1_vals): + exp.analysis.set_options(option1=opt1_val) + + opt1_new_val = 1000 + batch_exp.analysis.set_options(option1=opt1_new_val, broadcast=False) + + for exp in exp_list: + self.assertNotEqual(exp.analysis.options.option1, opt1_new_val) + + batch_exp.analysis.set_options(option1=opt1_new_val, broadcast=True) + for exp in exp_list: + self.assertEqual(exp.analysis.options.option1, opt1_new_val) + @data( ["0x0", "0x2", "0x3", "0x0", "0x0", "0x1", "0x3", "0x0", "0x2", "0x3"], ["00", "10", "11", "00", "00", "01", "11", "00", "10", "11"], diff --git a/test/framework/test_data_table.py b/test/framework/test_data_table.py index 3afead20f3..a1e34e7a1f 100644 --- a/test/framework/test_data_table.py +++ b/test/framework/test_data_table.py @@ -15,11 +15,10 @@ from test.base import QiskitExperimentsTestCase import uuid -import numpy as np import pandas as pd -from qiskit_experiments.database_service.utils import ThreadSafeDataFrame from qiskit_experiments.framework.analysis_result_table import AnalysisResultTable +from qiskit_experiments.framework.table_mixin import DefaultColumnsMixIn def _callable_thread_local_add_entry(args, thread_table): @@ -29,9 +28,9 @@ def _callable_thread_local_add_entry(args, thread_table): class TestBaseTable(QiskitExperimentsTestCase): - """Test case for data frame base class.""" + """Test case for default columns mix-in.""" - class TestTable(ThreadSafeDataFrame): + class TestTable(pd.DataFrame, DefaultColumnsMixIn): """A table class under test with test columns.""" @classmethod @@ -39,53 +38,36 @@ def _default_columns(cls): return ["value1", "value2", "value3"] def test_initializing_with_dict(self): - """Test initializing table with dictionary. Columns are filled with default.""" - table = TestBaseTable.TestTable( + """Test initializing table with dictionary.""" + table = TestBaseTable.TestTable.from_dict( { - "x": [1.0, 2.0, 3.0], - "y": [4.0, 5.0, 6.0], - } - ) - self.assertListEqual(table.get_columns(), ["value1", "value2", "value3"]) - - def test_raises_initializing_with_wrong_table(self): - """Test table cannot be initialized with non-default columns.""" - wrong_table = pd.DataFrame.from_dict( - data={"x": [1.0, 2.0], "y": [3.0, 4.0], "z": [5.0, 6.0]}, + "x": {"value1": 1.0, "value2": 2.0, "value3": 3.0}, + "y": {"value1": 4.0, "value2": 5.0, "value3": 6.0}, + }, orient="index", - columns=["wrong", "columns"], ) - with self.assertRaises(ValueError): - # columns doesn't match with default_columns - TestBaseTable.TestTable(wrong_table) - - def test_get_entry(self): - """Test getting an entry from the table.""" - table = TestBaseTable.TestTable({"x": [1.0, 2.0, 3.0]}) - self.assertListEqual(table.get_entry("x").to_list(), [1.0, 2.0, 3.0]) + self.assertListEqual(list(table.columns), ["value1", "value2", "value3"]) def test_add_entry(self): """Test adding data with default keys to table.""" table = TestBaseTable.TestTable() table.add_entry(index="x", value1=0.0, value2=1.0, value3=2.0) - self.assertListEqual(table.get_entry("x").to_list(), [0.0, 1.0, 2.0]) + self.assertListEqual(table.loc["x"].to_list(), [0.0, 1.0, 2.0]) def test_add_entry_with_missing_key(self): """Test adding entry with partly specified keys.""" table = TestBaseTable.TestTable() table.add_entry(index="x", value1=0.0, value3=2.0) - - # NaN value cannot be compared with assert - np.testing.assert_equal(table.get_entry("x").to_list(), [0.0, float("nan"), 2.0]) + self.assertListEqual(table.loc["x"].to_list(), [0.0, None, 2.0]) def test_add_entry_with_new_key(self): """Test adding data with new keys to table.""" table = TestBaseTable.TestTable() table.add_entry(index="x", value1=0.0, value2=1.0, value3=2.0, extra=3.0) - self.assertListEqual(table.get_columns(), ["value1", "value2", "value3", "extra"]) - self.assertListEqual(table.get_entry("x").to_list(), [0.0, 1.0, 2.0, 3.0]) + self.assertListEqual(list(table.columns), ["value1", "value2", "value3", "extra"]) + self.assertListEqual(table.loc["x"].to_list(), [0.0, 1.0, 2.0, 3.0]) def test_add_entry_with_multiple_new_keys(self): """Test new keys are added to column and the key order is preserved.""" @@ -93,93 +75,91 @@ def test_add_entry_with_multiple_new_keys(self): table.add_entry(index="x", phi=0.1, lamb=0.2, theta=0.3) self.assertListEqual( - table.get_columns(), - ["value1", "value2", "value3", "phi", "lamb", "theta"], + list(table.columns), ["value1", "value2", "value3", "phi", "lamb", "theta"] ) - def test_add_entry_with_new_key_with_existing_entry(self): - """Test adding new key will expand existing entry.""" + def test_dtype_missing_value_is_none(self): + """Test if missing value is always None. + + Deta frame implicitly convert None into NaN for numeric container. + This should not happen. + """ + table = TestBaseTable.TestTable() + table.add_entry(index="x", value1=1.0) + table.add_entry(index="y", value2=1.0) + + self.assertEqual(table.loc["x", "value2"], None) + self.assertEqual(table.loc["y", "value1"], None) + + def test_dtype_adding_extra_later(self): + """Test adding new row later with a numeric value doesn't change None to NaN.""" + table = TestBaseTable.TestTable() + table.add_entry(index="x") + table.add_entry(index="y", extra=1.0) + + self.assertListEqual(table.loc["x"].to_list(), [None, None, None, None]) + + def test_dtype_adding_null_row(self): + """Test adding new row with empty value doesn't change dtype of the columns.""" + table = TestBaseTable.TestTable() + table.add_entry(index="x", extra1=1, extra2=1.0, extra3=True, extra4="abc") + table.add_entry(index="y") + + self.assertIsInstance(table.loc["x", "extra1"], int) + self.assertIsInstance(table.loc["x", "extra2"], float) + self.assertIsInstance(table.loc["x", "extra3"], bool) + self.assertIsInstance(table.loc["x", "extra4"], str) + + def test_filter_columns(self): + """Test filtering table with columns.""" table = TestBaseTable.TestTable() table.add_entry(index="x", value1=0.0, value2=1.0, value3=2.0) - table.add_entry(index="y", value1=0.0, value2=1.0, value3=2.0, extra=3.0) - self.assertListEqual(table.get_columns(), ["value1", "value2", "value3", "extra"]) - self.assertListEqual(table.get_entry("y").to_list(), [0.0, 1.0, 2.0, 3.0]) + filt_table = table[["value1", "value3"]] + self.assertListEqual(filt_table.loc["x"].to_list(), [0.0, 2.0]) + + +class TestAnalysisTable(QiskitExperimentsTestCase): + """Test case for extra functionality of analysis table.""" - # NaN value cannot be compared with assert - np.testing.assert_equal(table.get_entry("x").to_list(), [0.0, 1.0, 2.0, float("nan")]) + def test_add_get_entry_with_result_id(self): + """Test adding entry with result_id. Index is created by truncating long string.""" + table = AnalysisResultTable() + table.add_entry(result_id="9a0bdec8-c010-4ef7-bb7d-b84939717a6b", value=0.123) + self.assertEqual(table.get_entry("9a0bdec8").value, 0.123) def test_drop_entry(self): """Test drop entry from the table.""" - table = TestBaseTable.TestTable() - table.add_entry(index="x", value1=0.0, value2=1.0, value3=2.0) - table.drop_entry("x") + table = AnalysisResultTable() + table.add_entry(result_id="9a0bdec8-c010-4ef7-bb7d-b84939717a6b", value=0.123) + table.drop_entry("9a0bdec8") self.assertEqual(len(table), 0) def test_drop_non_existing_entry(self): """Test dropping non-existing entry raises ValueError.""" - table = TestBaseTable.TestTable() + table = AnalysisResultTable() with self.assertRaises(ValueError): - table.drop_entry("x") - - def test_return_only_default_columns(self): - """Test extra entry is correctly recognized.""" - table = TestBaseTable.TestTable() - table.add_entry(index="x", value1=0.0, value2=1.0, value3=2.0, extra=3.0) - - default_table = table.container(collapse_extra=True) - self.assertListEqual(default_table.loc["x"].to_list(), [0.0, 1.0, 2.0]) + table.drop_entry("9a0bdec8") def test_raises_adding_duplicated_index(self): """Test adding duplicated index should raise.""" - table = TestBaseTable.TestTable() - table.add_entry(index="x", value1=0.0, value2=1.0, value3=2.0) + table = AnalysisResultTable() + table.add_entry(result_id="9a0bdec8-c010-4ef7-bb7d-b84939717a6b", value=0.0) with self.assertRaises(ValueError): - # index x is already used - table.add_entry(index="x", value1=3.0, value2=4.0, value3=5.0) + # index 9a0bdec8 is already used + table.add_entry(result_id="9a0bdec8-c010-4ef7-bb7d-b84939717a6b", value=1.0) def test_clear_container(self): """Test reset table.""" - table = TestBaseTable.TestTable() - table.add_entry(index="x", value1=0.0, value2=1.0, value3=2.0) + table = AnalysisResultTable() + table.add_entry(result_id="9a0bdec8-c010-4ef7-bb7d-b84939717a6b", value=0.0, extra=123) self.assertEqual(len(table), 1) table.clear() self.assertEqual(len(table), 0) - - def test_container_is_immutable(self): - """Test modifying container doesn't mutate the original payload.""" - table = TestBaseTable.TestTable() - table.add_entry(index="x", value1=0.1, value2=0.2, value3=0.3) - - dataframe = table.container() - dataframe.at["x", "value1"] = 100 - - # Local object can be modified - self.assertListEqual(dataframe.loc["x"].to_list(), [100, 0.2, 0.3]) - - # Original object in the experiment payload is preserved - self.assertListEqual(table.get_entry("x").to_list(), [0.1, 0.2, 0.3]) - - def test_round_trip(self): - """Test JSON roundtrip serialization with the experiment encoder.""" - table = TestBaseTable.TestTable() - table.add_entry(index="x", value1=0.0, value2=1.0, value3=2.0) - table.add_entry(index="y", value1=1.0, extra=2.0) - - self.assertRoundTripSerializable(table) - - -class TestAnalysisTable(QiskitExperimentsTestCase): - """Test case for extra functionality of analysis table.""" - - def test_add_entry_with_result_id(self): - """Test adding entry with result_id. Index is created by truncating long string.""" - table = AnalysisResultTable() - table.add_entry(result_id="9a0bdec8-c010-4ef7-bb7d-b84939717a6b", value=0.123) - self.assertEqual(table.get_entry("9a0bdec8").value, 0.123) + self.assertListEqual(table.copy().extra_columns(), []) def test_extra_column_name_is_always_returned(self): """Test extra column names are always returned in filtered column names.""" @@ -213,3 +193,29 @@ def test_no_overlap_result_id(self): table.add_entry(value=i) self.assertEqual(len(table), 100) + + def test_round_trip(self): + """Test JSON roundtrip serialization with the experiment encoder.""" + table = AnalysisResultTable() + table.add_entry(result_id="30d5d05c-c074-4d3c-9530-07a83d48883a", name="x", value=0.0) + table.add_entry(result_id="7c305972-858d-42a0-9b5e-57162efe20a1", name="y", value=1.0) + table.add_entry(result_id="61d8d351-c0cf-4a0a-ae57-fde0f3baa00d", name="z", value=2.0) + + self.assertRoundTripSerializable(table) + + def test_round_trip_with_extra(self): + """Test JSON roundtrip serialization with extra columns containing missing value.""" + table = AnalysisResultTable() + table.add_entry( + result_id="30d5d05c-c074-4d3c-9530-07a83d48883a", + name="x", + value=0.0, + extra1=2, + ) + table.add_entry( + result_id="7c305972-858d-42a0-9b5e-57162efe20a1", + name="y", + value=1.0, + extra2=0.123, + ) + self.assertRoundTripSerializable(table) diff --git a/test/framework/test_framework.py b/test/framework/test_framework.py index 4c122f05ec..16488a38f7 100644 --- a/test/framework/test_framework.py +++ b/test/framework/test_framework.py @@ -267,3 +267,95 @@ def error_message(self): res = expdata.analysis_results() self.assertEqual(len(res), 0) self.assertEqual(expdata.analysis_status(), AnalysisStatus.CANCELLED) + + @ddt.data(None, 1, 10, 100) + def test_max_circuits(self, max_experiments): + """Test running experiment with max_circuits""" + + num_circuits = 10 + + class MyExp(BaseExperiment): + """Some arbitrary experiment""" + + def __init__(self, physical_qubits): + super().__init__(physical_qubits) + + def circuits(self): + """Generate fake circuits""" + qc = QuantumCircuit(1) + qc.measure_all() + return num_circuits * [qc] + + backend = FakeBackend(max_experiments=max_experiments) + exp = MyExp([0]) + + # set backend + if backend is None: + if exp.backend is None: + self.assertRaises(QiskitError) + backend = exp.backend + exp.backend = backend + # Get max circuits for job splitting + max_circuits_option = getattr(exp.experiment_options, "max_circuits", None) + max_circuits_backend = exp._backend_data.max_circuits + if max_circuits_option and max_circuits_backend: + result = min(max_circuits_option, max_circuits_backend) + elif max_circuits_option: + result = max_circuits_option + else: + result = max_circuits_backend + + self.assertEqual(exp._max_circuits(backend=backend), result) + + @ddt.data(None, 1, 10, 100) + def test_job_info(self, max_experiments): + """Test job_info for specific backend""" + + num_circuits = 10 + + class MyExp(BaseExperiment): + """Some arbitrary experiment""" + + def __init__(self, physical_qubits): + super().__init__(physical_qubits) + + def circuits(self): + """Generate fake circuits""" + qc = QuantumCircuit(1) + qc.measure_all() + return num_circuits * [qc] + + backend = FakeBackend(max_experiments=max_experiments) + exp = MyExp([0]) + + if max_experiments is None: + num_jobs = 1 + else: + num_jobs = (num_circuits + max_experiments - 1) // max_experiments + + job_info = { + "Total number of circuits in the experiment": num_circuits, + "Maximum number of circuits per job": max_experiments, + "Total number of jobs": num_jobs, + } + + self.assertEqual(exp.job_info(backend=backend), job_info) + + def test_experiment_type(self): + """Test the experiment_type setter for the experiment.""" + + class MyExp(BaseExperiment): + """Some arbitrary experiment""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def circuits(self): + pass + + exp1 = MyExp(physical_qubits=[0], experiment_type="blaaa") + self.assertEqual(exp1.experiment_type, "blaaa") + exp2 = MyExp(physical_qubits=[0]) + self.assertEqual(exp2.experiment_type, "MyExp") + exp2.experiment_type = "suieee" + self.assertEqual(exp2.experiment_type, "suieee") diff --git a/test/framework/test_store_init_args.py b/test/framework/test_store_init_args.py index df46c68a8d..87ebf90052 100644 --- a/test/framework/test_store_init_args.py +++ b/test/framework/test_store_init_args.py @@ -12,7 +12,8 @@ """Tests for base experiment framework.""" -from qiskit.test import QiskitTestCase +from test.base import QiskitExperimentsTestCase + from qiskit_experiments.framework.store_init_args import StoreInitArgs @@ -58,7 +59,7 @@ def __init__(self, a, b, c="default_c", d="default_d"): pass -class TestSettings(QiskitTestCase): +class TestSettings(QiskitExperimentsTestCase): """Test Settings mixin""" # pylint: disable = missing-function-docstring diff --git a/test/library/calibration/test_drag.py b/test/library/calibration/test_drag.py index 8a095b1e46..b66e735f3b 100644 --- a/test/library/calibration/test_drag.py +++ b/test/library/calibration/test_drag.py @@ -48,14 +48,27 @@ def setUp(self): self.x_plus = xp self.test_tol = 0.1 - # pylint: disable=no-member - def test_end_to_end(self): + @data( + (None, None, None), + (0.0044, None, None), + (0.04, np.linspace(-4, 4, 31), {"beta": 1.8, "freq": 0.08}), + ) + @unpack + def test_end_to_end(self, freq, betas, p0_opt): """Test the drag experiment end to end.""" drag_experiment_helper = DragHelper(gate_name="Drag(xp)") + if freq: + drag_experiment_helper.frequency = freq backend = MockIQBackend(drag_experiment_helper) drag = RoughDrag([1], self.x_plus) + drag.set_run_options(shots=200) + + if betas is not None: + drag.set_experiment_options(betas=betas) + if p0_opt: + drag.analysis.set_options(p0=p0_opt) expdata = drag.run(backend) self.assertExperimentDone(expdata) @@ -64,42 +77,14 @@ def test_end_to_end(self): # pylint: disable=no-member self.assertTrue(abs(result.value.n - backend.experiment_helper.ideal_beta) < self.test_tol) self.assertEqual(result.quality, "good") - - # Small leakage will make the curves very flat, in this case one should - # rather increase beta. - drag_experiment_helper.frequency = 0.0044 - - drag = RoughDrag([0], self.x_plus) - exp_data = drag.run(backend) - self.assertExperimentDone(exp_data) - result = exp_data.analysis_results(1) - - # pylint: disable=no-member - self.assertTrue(abs(result.value.n - backend.experiment_helper.ideal_beta) < self.test_tol) - self.assertEqual(result.quality, "good") - - # Large leakage will make the curves oscillate quickly. - drag_experiment_helper.frequency = 0.04 - drag = RoughDrag([1], self.x_plus, betas=np.linspace(-4, 4, 31)) - # pylint: disable=no-member - drag.set_run_options(shots=200) - drag.analysis.set_options(p0={"beta": 1.8, "freq": 0.08}) - exp_data = drag.run(backend) - self.assertExperimentDone(exp_data) - result = exp_data.analysis_results(1) - - meas_level = exp_data.metadata["meas_level"] - - self.assertEqual(meas_level, MeasLevel.CLASSIFIED) - self.assertTrue(abs(result.value.n - backend.experiment_helper.ideal_beta) < self.test_tol) - self.assertEqual(result.quality, "good") + self.assertEqual(expdata.metadata["meas_level"], MeasLevel.CLASSIFIED) @data( - (0.0040, 1.0, 0.00, [1, 3, 5], None, 0.1), # partial oscillation. - (0.0020, 0.5, 0.00, [1, 3, 5], None, 0.5), # even slower oscillation with amp < 1 - (0.0040, 0.8, 0.05, [3, 5, 7], None, 0.1), # constant offset, i.e. lower SNR. - (0.0800, 0.9, 0.05, [1, 3, 5], np.linspace(-1, 1, 51), 0.1), # Beta not in range - (0.2000, 0.5, 0.10, [1, 3, 5], np.linspace(-2.5, 2.5, 51), 0.1), # Max closer to zero + (0.0040, 1.0, 0.00, [1, 3, 5], None, 0.2), # partial oscillation. + (0.0020, 0.5, 0.00, [1, 3, 5], None, 1.0), # even slower oscillation with amp < 1 + (0.0040, 0.8, 0.05, [3, 5, 7], None, 0.2), # constant offset, i.e. lower SNR. + (0.0800, 0.9, 0.05, [1, 3, 5], np.linspace(-1, 1, 51), 0.2), # Beta not in range + (0.2000, 0.5, 0.10, [1, 3, 5], np.linspace(-2.5, 2.5, 51), 0.2), # Max closer to zero ) @unpack def test_nasty_data(self, freq, amp, offset, reps, betas, tol): @@ -113,6 +98,7 @@ def test_nasty_data(self, freq, amp, offset, reps, betas, tol): drag = RoughDrag([0], self.x_plus, betas=betas) drag.set_experiment_options(reps=reps) + drag.set_run_options(shots=500) exp_data = drag.run(backend) self.assertExperimentDone(exp_data) @@ -190,7 +176,7 @@ def test_default_circuits(self): def test_circuit_roundtrip_serializable(self): """Test circuit serializations for drag experiment.""" drag = RoughDrag([0], self.x_plus) - drag.set_experiment_options(reps=[2, 4, 8]) + drag.set_experiment_options(reps=[2, 4], betas=[-5, 5]) drag.backend = FakeWashingtonV2() self.assertRoundTripSerializable(drag._transpiled_circuits()) diff --git a/test/library/calibration/test_half_angle.py b/test/library/calibration/test_half_angle.py index 40ae7b655d..7e6d704636 100644 --- a/test/library/calibration/test_half_angle.py +++ b/test/library/calibration/test_half_angle.py @@ -32,7 +32,7 @@ def setUp(self): super().setUp() library = FixedFrequencyTransmon() - self.backend = SingleTransmonTestBackend(noise=False) + self.backend = SingleTransmonTestBackend(noise=False, atol=1e-3) self.cals = Calibrations.from_backend(self.backend, libraries=[library]) def test_amp_parameter_error(self): diff --git a/test/library/calibration/test_rabi.py b/test/library/calibration/test_rabi.py index 48d471c314..df05f30d7f 100644 --- a/test/library/calibration/test_rabi.py +++ b/test/library/calibration/test_rabi.py @@ -34,25 +34,27 @@ class TestRabiEndToEnd(QiskitExperimentsTestCase): """Test the rabi experiment.""" - def setUp(self): + @classmethod + def setUpClass(cls): """Setup the tests.""" - super().setUp() + super().setUpClass() - self.qubit = 0 + cls.qubit = 0 with pulse.build(name="x") as sched: - pulse.play(pulse.Drag(160, Parameter("amp"), 40, 0.4), pulse.DriveChannel(self.qubit)) + pulse.play(pulse.Drag(160, Parameter("amp"), 40, 0.4), pulse.DriveChannel(cls.qubit)) - self.sched = sched - self.backend = SingleTransmonTestBackend(noise=False) + cls.sched = sched + cls.backend = SingleTransmonTestBackend(noise=False, atol=1e-3) # pylint: disable=no-member def test_rabi_end_to_end(self): """Test the Rabi experiment end to end.""" - test_tol = 0.015 + test_tol = 0.15 rabi = Rabi([self.qubit], self.sched, backend=self.backend) + rabi.set_run_options(shots=200) rabi.set_experiment_options(amplitudes=np.linspace(-0.1, 0.1, 21)) expdata = rabi.run() self.assertExperimentDone(expdata) @@ -100,7 +102,7 @@ def setUp(self): super().setUp() self.qubit = 0 - self.backend = SingleTransmonTestBackend(noise=False) + self.backend = SingleTransmonTestBackend(noise=False, atol=1e-4) self.anharmonicity = self.backend.anharmonicity with pulse.build(name="x") as sched: with pulse.frequency_offset(self.anharmonicity, pulse.DriveChannel(self.qubit)): @@ -114,7 +116,7 @@ def setUp(self): def test_ef_rabi_end_to_end(self): """Test the EFRabi experiment end to end.""" - test_tol = 0.01 + test_tol = 0.05 # Note that the backend is not sophisticated enough to simulate an e-f # transition so we run the test with a tiny frequency shift, still driving the e-g transition. diff --git a/test/library/calibration/test_ramsey_xy.py b/test/library/calibration/test_ramsey_xy.py index de167b9f71..76bd953211 100644 --- a/test/library/calibration/test_ramsey_xy.py +++ b/test/library/calibration/test_ramsey_xy.py @@ -56,7 +56,7 @@ def test_end_to_end(self, freq_shift: float): This test also checks that we can pickup frequency shifts with different signs. """ - test_tol = 0.01 + test_tol = 0.03 abs_tol = max(1e3, abs(freq_shift) * test_tol) exp_helper = RamseyXYHelper() diff --git a/test/library/calibration/test_rough_amplitude.py b/test/library/calibration/test_rough_amplitude.py index f916ee29db..239045d39b 100644 --- a/test/library/calibration/test_rough_amplitude.py +++ b/test/library/calibration/test_rough_amplitude.py @@ -32,7 +32,7 @@ def setUp(self): super().setUp() library = FixedFrequencyTransmon() - self.backend = SingleTransmonTestBackend(noise=False) + self.backend = SingleTransmonTestBackend(noise=False, atol=1e-3) self.cals = Calibrations.from_backend(self.backend, libraries=[library]) def test_circuits(self): @@ -84,7 +84,7 @@ def test_update(self): def test_circuit_roundtrip_serializable(self): """Test round trip JSON serialization""" - test_amps = [-0.5, 0, 0.5] + test_amps = [-0.5, 0] rabi = RoughXSXAmplitudeCal([0], self.cals, amplitudes=test_amps, backend=self.backend) self.assertRoundTripSerializable(rabi._transpiled_circuits()) @@ -100,14 +100,15 @@ def test_experiment_config(self): class TestSpecializations(QiskitExperimentsTestCase): """Test the specialized versions of the calibration.""" - def setUp(self): + @classmethod + def setUpClass(cls): """Setup the tests""" - super().setUp() + super().setUpClass() library = FixedFrequencyTransmon() - self.backend = SingleTransmonTestBackend(noise=False) - self.cals = Calibrations.from_backend(self.backend, libraries=[library]) + cls.backend = SingleTransmonTestBackend(noise=False, atol=1e-3) + cls.cals = Calibrations.from_backend(cls.backend, libraries=[library]) # Add some pulses on the 1-2 transition. d0 = pulse.DriveChannel(0) @@ -119,12 +120,12 @@ def setUp(self): with pulse.frequency_offset(-300e6, d0): pulse.play(pulse.Drag(Parameter("duration"), Parameter("amp"), 40, 0.0), d0) - self.cals.add_schedule(x12, 0) - self.cals.add_schedule(sx12, 0) - self.cals.add_parameter_value(0.4, "amp", 0, "x12") - self.cals.add_parameter_value(0.2, "amp", 0, "sx12") - self.cals.add_parameter_value(160, "duration", 0, "x12") - self.cals.add_parameter_value(160, "duration", 0, "sx12") + cls.cals.add_schedule(x12, 0) + cls.cals.add_schedule(sx12, 0) + cls.cals.add_parameter_value(0.4, "amp", 0, "x12") + cls.cals.add_parameter_value(0.2, "amp", 0, "sx12") + cls.cals.add_parameter_value(160, "duration", 0, "x12") + cls.cals.add_parameter_value(160, "duration", 0, "sx12") def test_ef_circuits(self): """Test that we get the expected circuits with calibrations for the EF experiment.""" @@ -153,12 +154,13 @@ def test_ef_circuits(self): def test_ef_update(self): """Test that we properly update the pulses on the 1<->2 transition.""" - tol = 0.01 - default_amp = 0.5 / self.backend.rabi_rate_12[0] + tol = 0.05 + default_amp = 0.5 / self.backend.rabi_rate_12 rabi_ef = EFRoughXSXAmplitudeCal( [0], self.cals, amplitudes=np.linspace(-0.1, 0.1, 11), backend=self.backend ) + rabi_ef.set_run_options(shots=200) expdata = rabi_ef.run() self.assertExperimentDone(expdata) @@ -172,6 +174,7 @@ def test_ef_update(self): self.cals.add_parameter_value(int(4 * 160 / 5), "duration", 0, "x12") self.cals.add_parameter_value(int(4 * 160 / 5), "duration", 0, "sx12") rabi_ef = EFRoughXSXAmplitudeCal([0], self.cals, amplitudes=np.linspace(-0.1, 0.1, 11)) + rabi_ef.set_run_options(shots=200) expdata = rabi_ef.run(self.backend) self.assertExperimentDone(expdata) diff --git a/test/library/calibration/test_rough_frequency.py b/test/library/calibration/test_rough_frequency.py index 3b32ad3d26..930cce1814 100644 --- a/test/library/calibration/test_rough_frequency.py +++ b/test/library/calibration/test_rough_frequency.py @@ -28,7 +28,7 @@ class TestRoughFrequency(QiskitExperimentsTestCase): def setUp(self): """Setup the tests.""" super().setUp() - self.backend = SingleTransmonTestBackend(noise=False) + self.backend = SingleTransmonTestBackend(noise=False, atol=1e-3) def test_init(self): """Test that initialization.""" @@ -53,7 +53,9 @@ def test_update_calibrations(self): freq01 = BackendData(self.backend).drive_freqs[0] - backend_5mhz = SingleTransmonTestBackend(qubit_frequency=freq01 + 5e6, noise=False) + backend_5mhz = SingleTransmonTestBackend( + qubit_frequency=freq01 + 5e6, noise=False, atol=1e-3 + ) library = FixedFrequencyTransmon() cals = Calibrations.from_backend(self.backend, libraries=[library]) diff --git a/test/library/characterization/test_cross_resonance_hamiltonian.py b/test/library/characterization/test_cross_resonance_hamiltonian.py index 34a2637330..540c0e0dc4 100644 --- a/test/library/characterization/test_cross_resonance_hamiltonian.py +++ b/test/library/characterization/test_cross_resonance_hamiltonian.py @@ -21,7 +21,13 @@ from ddt import ddt, data, unpack from qiskit import QuantumCircuit, pulse, qpy, quantum_info as qi from qiskit.providers.fake_provider import FakeBogotaV2 -from qiskit.extensions.hamiltonian_gate import HamiltonianGate + +# TODO: remove old path after we stop supporting the relevant version of Qiskit +try: + from qiskit.circuit.library.hamiltonian_gate import HamiltonianGate +except ModuleNotFoundError: + from qiskit.extensions.hamiltonian_gate import HamiltonianGate + from qiskit_aer import AerSimulator from qiskit_experiments.library.characterization import cr_hamiltonian @@ -192,7 +198,7 @@ def test_integration(self, ix, iy, iz, zx, zy, zz): expr = cr_hamiltonian.CrossResonanceHamiltonian( physical_qubits=(0, 1), sigma=sigma, - # A hack to avoild local function in pickle, i.e. in transpile. + # A hack to avoid local function in pickle, i.e. in transpile. cr_gate=functools.partial( SimulatableCRGate, hamiltonian=hamiltonian, sigma=sigma, dt=dt ), diff --git a/test/library/characterization/test_half_angle.py b/test/library/characterization/test_half_angle.py index 3f2a75e47b..e90c6b6d96 100644 --- a/test/library/characterization/test_half_angle.py +++ b/test/library/characterization/test_half_angle.py @@ -44,12 +44,12 @@ def test_end_to_end(self): self.assertTrue(abs(d_theta - error) < tol) def test_circuits(self): - """Test that transpiling works and that we can have a y gate with a calibration.""" + """Test that transpiling works.""" qubit = 1 inst_map = InstructionScheduleMap() - for inst in ["sx", "y"]: + for inst in ["sx", "x"]: inst_map.add(inst, (qubit,), pulse.Schedule(name=inst)) hac = HalfAngle([qubit]) @@ -64,8 +64,8 @@ def test_circuits(self): self.assertEqual(circ.count_ops()["sx"], idx * 2 + 2) self.assertEqual(circ.calibrations["sx"][((qubit,), ())], pulse.Schedule(name="sx")) if idx > 0: - self.assertEqual(circ.count_ops()["y"], idx) - self.assertEqual(circ.calibrations["y"][((qubit,), ())], pulse.Schedule(name="y")) + self.assertEqual(circ.count_ops()["x"], idx) + self.assertEqual(circ.calibrations["x"][((qubit,), ())], pulse.Schedule(name="x")) def test_experiment_config(self): """Test converting to and from config works""" diff --git a/test/library/characterization/test_multi_state_discrimination.py b/test/library/characterization/test_multi_state_discrimination.py index e2ed633f08..36990bb884 100644 --- a/test/library/characterization/test_multi_state_discrimination.py +++ b/test/library/characterization/test_multi_state_discrimination.py @@ -49,7 +49,7 @@ def setUp(self): """Setup test variables.""" super().setUp() - self.backend = SingleTransmonTestBackend(noise=False) + self.backend = SingleTransmonTestBackend(noise=False, atol=1e-3) # Build x12 schedule self.qubit = 0 @@ -63,12 +63,9 @@ def setUp(self): amp_x = pulse_x.amp dur_x = pulse_x.duration sigma_x = pulse_x.sigma - beta_x = pulse_x.beta with pulse.build(name="x12") as x12: pulse.shift_frequency(anharm, d0) - pulse.play( - pulse.Gaussian(dur_x, amp_x * self.backend.rabi_rate_12[0], sigma_x, beta_x), d0 - ) + pulse.play(pulse.Gaussian(dur_x, amp_x * self.backend.rabi_rate_12, sigma_x), d0) pulse.shift_frequency(-anharm, d0) self.schedules = {"x12": x12} @@ -97,7 +94,7 @@ def test_discrimination_analysis(self, n_states): fidelity = exp_data.analysis_results("fidelity").value - self.assertGreaterEqual(fidelity, 0.96) + self.assertGreaterEqual(fidelity, 0.93) # check that the discriminator differentiates n different states discrim_lbls = exp_data.analysis_results("discriminator_config").value["attributes"][ diff --git a/test/library/characterization/test_qubit_spectroscopy.py b/test/library/characterization/test_qubit_spectroscopy.py index d3522c4a7f..12ae08a4a5 100644 --- a/test/library/characterization/test_qubit_spectroscopy.py +++ b/test/library/characterization/test_qubit_spectroscopy.py @@ -160,7 +160,7 @@ def test_experiment_config(self): def test_roundtrip_serializable(self): """Test round trip JSON serialization""" - exp = QubitSpectroscopy([1], np.linspace(int(100e6), int(150e6), int(20e6))) + exp = QubitSpectroscopy([1], np.linspace(int(100e6), int(150e6), 4)) # Checking serialization of the experiment self.assertRoundTripSerializable(exp) @@ -270,7 +270,9 @@ def test_parallel_experiment(self): par_experiment = ParallelExperiment( exp_list, flatten_results=False, backend=parallel_backend ) - par_experiment.set_run_options(meas_level=MeasLevel.KERNELED, meas_return="single") + par_experiment.set_run_options( + meas_level=MeasLevel.KERNELED, meas_return="single", shots=20 + ) par_data = par_experiment.run() self.assertExperimentDone(par_data) @@ -288,7 +290,7 @@ def test_circuit_roundtrip_serializable(self): backend = FakeWashingtonV2() qubit = 1 freq01 = BackendData(backend).drive_freqs[qubit] - frequencies = np.linspace(freq01 - 10.0e6, freq01 + 10.0e6, 21) + frequencies = np.linspace(freq01 - 10.0e6, freq01 + 10.0e6, 3) exp = QubitSpectroscopy([1], frequencies, backend=backend) # Checking serialization of the experiment self.assertRoundTripSerializable(exp._transpiled_circuits()) diff --git a/test/library/characterization/test_readout_angle.py b/test/library/characterization/test_readout_angle.py index 1413006e13..a26ffd4e2b 100644 --- a/test/library/characterization/test_readout_angle.py +++ b/test/library/characterization/test_readout_angle.py @@ -36,7 +36,7 @@ def test_readout_angle_end2end(self): MockIQReadoutAngleHelper(iq_cluster_centers=[((-3.0, 3.0), (5.0, 5.0))]), ) exp = ReadoutAngle([0]) - expdata = exp.run(backend, shots=100000) + expdata = exp.run(backend, shots=10000) self.assertExperimentDone(expdata) res = expdata.analysis_results(0) self.assertAlmostEqual(res.value % (2 * np.pi), np.pi / 2, places=2) @@ -45,7 +45,7 @@ def test_readout_angle_end2end(self): MockIQReadoutAngleHelper(iq_cluster_centers=[((0, -3.0), (5.0, 5.0))]), ) exp = ReadoutAngle([0]) - expdata = exp.run(backend, shots=100000) + expdata = exp.run(backend, shots=10000) self.assertExperimentDone(expdata) res = expdata.analysis_results(0) self.assertAlmostEqual(res.value % (2 * np.pi), 15 * np.pi / 8, places=2) diff --git a/test/library/characterization/test_resonator_spectroscopy.py b/test/library/characterization/test_resonator_spectroscopy.py index ef7d4ca406..cbbb110292 100644 --- a/test/library/characterization/test_resonator_spectroscopy.py +++ b/test/library/characterization/test_resonator_spectroscopy.py @@ -134,14 +134,14 @@ def test_end_to_end(self, freq_shift): def test_experiment_config(self): """Test converting to and from config works""" - exp = ResonatorSpectroscopy([1], frequencies=np.linspace(100, 150, 20) * 1e6) + exp = ResonatorSpectroscopy([1], frequencies=np.linspace(100, 150, 4) * 1e6) loaded_exp = ResonatorSpectroscopy.from_config(exp.config()) self.assertNotEqual(exp, loaded_exp) self.assertEqualExtended(exp, loaded_exp) def test_roundtrip_serializable(self): """Test round trip JSON serialization""" - exp = ResonatorSpectroscopy([1], frequencies=np.linspace(int(100e6), int(150e6), int(20e6))) + exp = ResonatorSpectroscopy([1], frequencies=np.linspace(int(100e6), int(150e6), 4)) self.assertRoundTripSerializable(exp) def test_circuit_roundtrip_serializable(self): @@ -158,7 +158,7 @@ def test_circuit_roundtrip_serializable(self): ), ) res_freq = BackendData(backend).meas_freqs[qubit] - frequencies = np.linspace(res_freq - 20e6, res_freq + 20e6, 51) + frequencies = np.linspace(res_freq - 20e6, res_freq + 20e6, 3) exp = ResonatorSpectroscopy([qubit], backend=backend, frequencies=frequencies) self.assertRoundTripSerializable(exp._transpiled_circuits()) @@ -177,7 +177,7 @@ def test_kerneled_expdata_serialization(self, freq_shift): res_freq = BackendData(backend).meas_freqs[qubit] - frequencies = np.linspace(res_freq - 20e6, res_freq + 20e6, 51) + frequencies = np.linspace(res_freq - 20e6, res_freq + 20e6, 11) exp = ResonatorSpectroscopy([qubit], backend=backend, frequencies=frequencies) expdata = exp.run(backend) @@ -226,8 +226,8 @@ def test_parallel_experiment(self): res_freq1 = backend_data.meas_freqs[qubit1] res_freq2 = backend_data.meas_freqs[qubit2] - frequencies1 = np.linspace(res_freq1 - 20e6, res_freq1 + 20e6, 51) - frequencies2 = np.linspace(res_freq2 - 20e6, res_freq2 + 20e6, 53) + frequencies1 = np.linspace(res_freq1 - 20e6, res_freq1 + 20e6, 11) + frequencies2 = np.linspace(res_freq2 - 20e6, res_freq2 + 20e6, 13) res_spect1 = ResonatorSpectroscopy( [qubit1], backend=parallel_backend, frequencies=frequencies1 diff --git a/test/library/characterization/test_t1.py b/test/library/characterization/test_t1.py index 7a275d3c31..7c60e96ddd 100644 --- a/test/library/characterization/test_t1.py +++ b/test/library/characterization/test_t1.py @@ -190,37 +190,6 @@ def test_t1_parallel_measurement_level_1(self): self.assertEqual(sub_res.quality, "good") self.assertAlmostEqual(sub_res.value.n, t1[qb], delta=3) - def test_t1_parallel_different_analysis_options(self): - """ - Test parallel experiments of T1 using a simulator, for the case where - the sub-experiments have different analysis options - """ - - t1 = [25, 25] - t2 = [value / 2 for value in t1] - - backend = NoisyDelayAerBackend(t1, t2) - - delays = list(range(1, 40, 3)) - - exp0 = T1([0], delays) - exp0.analysis.set_options(p0={"tau": 30}) - - exp1 = T1([1], delays) - exp1.analysis.set_options(p0={"tau": 1000000}) - - par_exp = ParallelExperiment([exp0, exp1], flatten_results=False) - res = par_exp.run(backend=backend, seed_simulator=4) - self.assertExperimentDone(res) - - sub_res = [] - for i in range(2): - sub_res.append(res.child_data(i).analysis_results("T1")) - - self.assertEqual(sub_res[0].quality, "good") - self.assertAlmostEqual(sub_res[0].value.n, t1[0], delta=3) - self.assertEqual(sub_res[1].quality, "bad") - def test_t1_analysis(self): """ Test T1Analysis @@ -342,7 +311,7 @@ def test_experiment_config(self): def test_roundtrip_serializable(self): """Test round trip JSON serialization""" - exp = T1([0], [1, 2, 3, 4, 5]) + exp = T1([0], [1, 2]) self.assertRoundTripSerializable(exp) def test_circuit_roundtrip_serializable(self): diff --git a/test/library/characterization/test_t2hahn.py b/test/library/characterization/test_t2hahn.py index 3656689143..8e3d124e48 100644 --- a/test/library/characterization/test_t2hahn.py +++ b/test/library/characterization/test_t2hahn.py @@ -179,7 +179,7 @@ def test_experiment_config(self): def test_roundtrip_serializable(self): """Test round trip JSON serialization""" - delays0 = list(range(1, 60, 2)) + delays0 = list(range(1, 60, 20)) exp = T2Hahn([0], delays0) self.assertRoundTripSerializable(exp) @@ -205,7 +205,7 @@ def test_roundtrip_serializable(self): def test_circuit_roundtrip_serializable(self): """Test round trip JSON serialization""" - delays0 = list(range(1, 60, 2)) + delays0 = list(range(1, 60, 20)) # backend is needed for serialization of the delays in the metadata of the experiment. backend = FakeVigoV2() exp = T2Hahn([0], delays0, backend=backend) diff --git a/test/library/characterization/test_t2ramsey.py b/test/library/characterization/test_t2ramsey.py index e5a90e8a36..599c9a547a 100644 --- a/test/library/characterization/test_t2ramsey.py +++ b/test/library/characterization/test_t2ramsey.py @@ -222,13 +222,13 @@ def test_experiment_config(self): def test_roundtrip_serializable(self): """Test round trip JSON serialization""" - exp = T2Ramsey([0], [1, 2, 3, 4, 5]) + exp = T2Ramsey([0], [1, 2]) self.assertRoundTripSerializable(exp) def test_circuit_roundtrip_serializable(self): """Test round trip JSON serialization""" backend = FakeVigoV2() - exp = T2Ramsey([0], [1, 2, 3, 4, 5], backend=backend) + exp = T2Ramsey([0], [1, 2], backend=backend) self.assertRoundTripSerializable(exp._transpiled_circuits()) def test_analysis_config(self): diff --git a/test/library/quantum_volume/test_qv.py b/test/library/quantum_volume/test_qv.py index b303b742c2..eca6c4ce03 100644 --- a/test/library/quantum_volume/test_qv.py +++ b/test/library/quantum_volume/test_qv.py @@ -275,6 +275,6 @@ def test_roundtrip_serializable(self): def test_circuit_roundtrip_serializable(self): """Test expdata round trip JSON serialization""" - num_of_qubits = 4 + num_of_qubits = 3 qv_exp = QuantumVolume(range(num_of_qubits), seed=SEED) self.assertRoundTripSerializable(qv_exp._transpiled_circuits()) diff --git a/test/library/randomized_benchmarking/test_clifford_utils.py b/test/library/randomized_benchmarking/test_clifford_utils.py index 000ed3c36c..ddc11c9b94 100644 --- a/test/library/randomized_benchmarking/test_clifford_utils.py +++ b/test/library/randomized_benchmarking/test_clifford_utils.py @@ -20,6 +20,7 @@ from numpy.random import default_rng from qiskit import QuantumCircuit +from qiskit.exceptions import QiskitError from qiskit.circuit.library import ( IGate, XGate, @@ -43,6 +44,7 @@ _num_from_layer_indices, _layer_indices_from_num, _CLIFFORD_LAYER, + _CLIFFORD_INVERSE_2Q, ) @@ -195,3 +197,27 @@ def test_num_from_layer(self): circ.compose(_CLIFFORD_LAYER[layer][idx], inplace=True) layered = Clifford(circ) self.assertEqual(standard, layered) + + def test_num_from_2q_circuit(self): + """Check conversion of circuits to integers with num_from_2q_circuit""" + qc = QuantumCircuit(2) + qc.h(0) + num = num_from_2q_circuit(qc) + self.assertEqual(num, 5760) + qc = QuantumCircuit(2) + qc.u(0, 0, np.pi, 0) + with self.assertRaises(QiskitError): + # raising an error is ok, num_from_2q_circuit does not support all 2-qubit gates + num_from_2q_circuit(qc) + + # regression test for using the dense multiplication table + qc = QuantumCircuit(2) + qc.cz(1, 0) + num = num_from_2q_circuit(qc) + self.assertEqual(num, 368) + + def test_clifford_inverse_table(self): + """Check correctness of the Clifford inversion table""" + for lhs, rhs in enumerate(_CLIFFORD_INVERSE_2Q): + c = compose_2q(lhs, rhs) + self.assertEqual(c, 0) diff --git a/test/library/randomized_benchmarking/test_interleaved_rb.py b/test/library/randomized_benchmarking/test_interleaved_rb.py index a7151495bf..6ed28d0f53 100644 --- a/test/library/randomized_benchmarking/test_interleaved_rb.py +++ b/test/library/randomized_benchmarking/test_interleaved_rb.py @@ -72,14 +72,14 @@ def test_experiment_config(self): def test_roundtrip_serializable(self): """Test round trip JSON serialization""" exp = rb.InterleavedRB( - interleaved_element=SXGate(), physical_qubits=(0,), lengths=[10, 20, 30], seed=123 + interleaved_element=SXGate(), physical_qubits=(0,), lengths=[1, 3], seed=123 ) self.assertRoundTripSerializable(exp) def test_circuit_roundtrip_serializable(self): """Test circuits round trip JSON serialization""" exp = rb.InterleavedRB( - interleaved_element=SXGate(), physical_qubits=(0,), lengths=[10, 20, 30], seed=123 + interleaved_element=SXGate(), physical_qubits=(0,), lengths=[1, 3], seed=123 ) self.assertRoundTripSerializable(exp._transpiled_circuits()) @@ -276,6 +276,7 @@ def test_interleaving_cnot_gate_with_non_supported_direction(self): lengths=[3], num_samples=4, backend=my_backend, + seed=1234, ) transpiled = exp._transpiled_circuits() for qc in transpiled: diff --git a/test/library/randomized_benchmarking/test_rb_analysis.py b/test/library/randomized_benchmarking/test_rb_analysis.py index e9ab6ffe5a..b71005f186 100644 --- a/test/library/randomized_benchmarking/test_rb_analysis.py +++ b/test/library/randomized_benchmarking/test_rb_analysis.py @@ -32,20 +32,21 @@ class TestEPGAnalysis(QiskitExperimentsTestCase): by comparing the value with the depolarizing probability. """ - def setUp(self): - """Setup the tests.""" - super().setUp() + @classmethod + def setUpClass(cls): + """Run experiments without analysis for test data preparation.""" + super().setUpClass() # Setup noise model, including more gate for complicated EPG computation # Note that 1Q channel error is amplified to check 1q channel correction mechanism - self.p_x = 0.04 - self.p_h = 0.02 - self.p_s = 0.0 - self.p_cx = 0.09 - x_error = depolarizing_error(self.p_x, 1) - h_error = depolarizing_error(self.p_h, 1) - s_error = depolarizing_error(self.p_s, 1) - cx_error = depolarizing_error(self.p_cx, 2) + cls.p_x = 0.04 + cls.p_h = 0.02 + cls.p_s = 0.0 + cls.p_cx = 0.09 + x_error = depolarizing_error(cls.p_x, 1) + h_error = depolarizing_error(cls.p_h, 1) + s_error = depolarizing_error(cls.p_s, 1) + cx_error = depolarizing_error(cls.p_cx, 2) noise_model = NoiseModel() noise_model.add_all_qubit_quantum_error(x_error, "x") @@ -70,8 +71,7 @@ def setUp(self): backend=backend, ) exp_1qrb_q0.set_transpile_options(**transpiler_options) - expdata_1qrb_q0 = exp_1qrb_q0.run(analysis=None) - self.assertExperimentDone(expdata_1qrb_q0, timeout=300) + expdata_1qrb_q0 = exp_1qrb_q0.run(analysis=None).block_for_results() exp_1qrb_q1 = rb.StandardRB( physical_qubits=(1,), @@ -80,8 +80,7 @@ def setUp(self): backend=backend, ) exp_1qrb_q1.set_transpile_options(**transpiler_options) - expdata_1qrb_q1 = exp_1qrb_q1.run(analysis=None) - self.assertExperimentDone(expdata_1qrb_q1, timeout=300) + expdata_1qrb_q1 = exp_1qrb_q1.run(analysis=None).block_for_results() exp_2qrb = rb.StandardRB( physical_qubits=(0, 1), @@ -90,12 +89,18 @@ def setUp(self): backend=backend, ) exp_2qrb.set_transpile_options(**transpiler_options) - expdata_2qrb = exp_2qrb.run(analysis=None) - self.assertExperimentDone(expdata_2qrb, timeout=300) + expdata_2qrb = exp_2qrb.run(analysis=None).block_for_results() + + cls.expdata_1qrb_q0 = expdata_1qrb_q0 + cls.expdata_1qrb_q1 = expdata_1qrb_q1 + cls.expdata_2qrb = expdata_2qrb - self.expdata_1qrb_q0 = expdata_1qrb_q0 - self.expdata_1qrb_q1 = expdata_1qrb_q1 - self.expdata_2qrb = expdata_2qrb + def setUp(self): + """Setup the tests.""" + super().setUp() + self.assertExperimentDone(self.expdata_1qrb_q0) + self.assertExperimentDone(self.expdata_1qrb_q1) + self.assertExperimentDone(self.expdata_2qrb) def test_default_epg_ratio(self): """Calculate EPG with default ratio dictionary. H and X have the same ratio.""" diff --git a/test/library/randomized_benchmarking/test_standard_rb.py b/test/library/randomized_benchmarking/test_standard_rb.py index 7990cbaa09..71710292d0 100644 --- a/test/library/randomized_benchmarking/test_standard_rb.py +++ b/test/library/randomized_benchmarking/test_standard_rb.py @@ -57,12 +57,12 @@ def test_experiment_config(self): def test_roundtrip_serializable(self): """Test round trip JSON serialization""" - exp = rb.StandardRB(physical_qubits=(0,), lengths=[10, 20, 30], seed=123) + exp = rb.StandardRB(physical_qubits=(0,), lengths=[1, 3], seed=123) self.assertRoundTripSerializable(exp) def test_circuit_roundtrip_serializable(self): """Test circuits round trip JSON serialization""" - exp = rb.StandardRB(physical_qubits=(0,), lengths=[10, 20, 30], seed=123) + exp = rb.StandardRB(physical_qubits=(0,), lengths=[1, 3], seed=123) self.assertRoundTripSerializable(exp._transpiled_circuits()) def test_analysis_config(self): @@ -319,11 +319,29 @@ def test_three_qubit(self): def test_add_more_circuit_yields_lower_variance(self): """Test variance reduction with larger number of sampling.""" + + # Increase single qubit error so that we can see gate error with a + # small number of Cliffords since we want to run many samples without + # taking too long. + p1q = 0.15 + pvz = 0.0 + + # setup noise model + sx_error = depolarizing_error(p1q, 1) + rz_error = depolarizing_error(pvz, 1) + + noise_model = NoiseModel() + noise_model.add_all_qubit_quantum_error(sx_error, "sx") + noise_model.add_all_qubit_quantum_error(rz_error, "rz") + + # Aer simulator + backend = AerSimulator(noise_model=noise_model, seed_simulator=123) + exp1 = rb.StandardRB( - physical_qubits=(0, 1), + physical_qubits=(0,), lengths=list(range(1, 30, 3)), seed=123, - backend=self.backend, + backend=backend, num_samples=3, ) exp1.analysis.set_options(gate_error_ratio=None) @@ -332,11 +350,11 @@ def test_add_more_circuit_yields_lower_variance(self): self.assertExperimentDone(expdata1) exp2 = rb.StandardRB( - physical_qubits=(0, 1), + physical_qubits=(0,), lengths=list(range(1, 30, 3)), seed=456, - backend=self.backend, - num_samples=5, + backend=backend, + num_samples=30, ) exp2.analysis.set_options(gate_error_ratio=None) exp2.set_transpile_options(**self.transpiler_options) diff --git a/test/library/tomography/test_process_tomography.py b/test/library/tomography/test_process_tomography.py index 93fdee3857..17bed174a5 100644 --- a/test/library/tomography/test_process_tomography.py +++ b/test/library/tomography/test_process_tomography.py @@ -103,7 +103,7 @@ def test_circuit_roundtrip_serializable(self): circ.s(0) circ.cx(0, 1) - exp = ProcessTomography(circ) + exp = ProcessTomography(circ, preparation_indices=[0], measurement_indices=[0]) self.assertRoundTripSerializable(exp._transpiled_circuits()) def test_cvxpy_gaussian_lstsq_cx(self): @@ -568,7 +568,7 @@ def test_qpt_conditional_circuit(self, circuit_clbits): fid = qi.process_fidelity(state.value, targets[idx], require_tp=False) self.assertGreater( fid, - 0.95, + 0.935, msg=f"{fitter} fidelity {fid} is low for conditional outcome {idx}", ) @@ -599,7 +599,7 @@ def test_qpt_conditional_meas(self): prob = state.extra["conditional_probability"] prob_target = 0.5 self.assertTrue( - np.isclose(prob, prob_target, atol=1e-2), + np.isclose(prob, prob_target, atol=2e-2), msg=( f"fitter {fitter} probability incorrect for conditional" f" measurement {idx} {outcome} ({prob} != {prob_target})" diff --git a/test/test_base.py b/test/test_base.py new file mode 100644 index 0000000000..cf5c010c96 --- /dev/null +++ b/test/test_base.py @@ -0,0 +1,27 @@ +# This code is part of Qiskit. +# +# (C) Copyright IBM 2021. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. +""" +Tests for qiskit-experiments base test module +""" + +from test.base import create_base_test_case + + +UnittestBase = create_base_test_case(use_testtools=False) + + +class TestQiskitExperimentsTestCaseWithUnittest(UnittestBase): + """Test QiskitExperimentsTestCase behavior when not based on testtools.TestCase""" + + def test_test(self): + """Test that a test not based on ``testtools`` can run""" + pass diff --git a/test/visualization/test_plotter_mpldrawer.py b/test/visualization/test_plotter_mpldrawer.py index b39f9fcd4d..9e795b3265 100644 --- a/test/visualization/test_plotter_mpldrawer.py +++ b/test/visualization/test_plotter_mpldrawer.py @@ -32,6 +32,11 @@ class TestPlotterAndMplDrawer(QiskitExperimentsTestCase): """Test generic plotter with Matplotlib drawer.""" + def tearDown(self): + """Clean up test case state""" + plt.close("all") + super().tearDown() + def test_end_to_end_short(self): """Test whether plotter with MplDrawer returns a figure.""" plotter = MockPlotter(MplDrawer()) @@ -118,7 +123,7 @@ def test_series_names_different_types(self, series_names: Dict[type, List[Any]]) """ # Create Matplotlib axes that use a PNG backend. The default backend, FigureCanvasSVG, does not - # have `tostring_rgb()` which is needed to compute the difference between two figures in this + # have `buffer_rgba()` which is needed to compute the difference between two figures in this # method. We need to set the axes as MplDrawer will use # `qiskit_experiments.framework.matplotlib.get_non_gui_ax` by default; which uses an SVG backend. plt.switch_backend("Agg") @@ -161,16 +166,12 @@ def test_series_names_different_types(self, series_names: Dict[type, List[Any]]) for plot_type in legend_plot_types: plotter.enable_legend_for(series_name, plot_type) - # Generate figure and save to buffers for comparison. This requires a pixel backend, like AGG, so - # that `tostring_rgb()` is available. + # Generate figure and save to buffers for comparison. figure_data = {} for plotter_type, plotter in plotters.items(): figure = plotter.figure().figure figure.canvas.draw() - figure_data[plotter_type] = np.frombuffer( - figure.canvas.tostring_rgb(), - dtype=np.uint8, - ).reshape(figure.canvas.get_width_height() + (3,)) + figure_data[plotter_type] = np.asarray(figure.canvas.buffer_rgba(), dtype=np.uint8) # Compare root-mean-squared error between two images. for (fig1_type, fig1), (fig2_type, fig2) in combinations(figure_data.items(), 2): diff --git a/tox.ini b/tox.ini index a83c13922f..1392db4a83 100644 --- a/tox.ini +++ b/tox.ini @@ -9,6 +9,7 @@ install_command = pip install -c{toxinidir}/constraints.txt -U {opts} {packages} setenv = VIRTUAL_ENV={envdir} QISKIT_SUPPRESS_PACKAGING_WARNINGS=Y + QISKIT_TEST_CAPTURE_STREAMS=1 deps = -r{toxinidir}/requirements-dev.txt -r{toxinidir}/requirements-extras.txt @@ -17,6 +18,8 @@ passenv = QISKIT_PARALLEL RAYON_NUM_THREADS QISKIT_IBM_* + TEST_TIMEOUT + QE_USE_TESTTOOLS commands = stestr run {posargs} [testenv:cover] @@ -29,14 +32,15 @@ commands = coverage3 combine coverage3 lcov -[testenv:terra-main] +[testenv:qiskit-main] usedevelop = True install_command = pip install -U {opts} {packages} setenv = VIRTUAL_ENV={envdir} QISKIT_SUPPRESS_PACKAGING_WARNINGS=Y + QISKIT_TEST_CAPTURE_STREAMS=1 deps = - git+https://github.com/Qiskit/qiskit-terra + git+https://github.com/Qiskit/qiskit -r{toxinidir}/requirements-dev.txt -r{toxinidir}/requirements-extras.txt passenv = @@ -44,15 +48,17 @@ passenv = QISKIT_PARALLEL RAYON_NUM_THREADS QISKIT_IBM_* + TEST_TIMEOUT + QE_USE_TESTTOOLS commands = stestr run {posargs} [testenv:lint] envdir = .tox/lint commands = - black --check {posargs} qiskit_experiments test tools setup.py - pylint -rn -j 0 --rcfile="{toxinidir}/.pylintrc" qiskit_experiments/ test/ tools/ - python "{toxinidir}/tools/verify_headers.py" + black --check qiskit_experiments test tools setup.py + pylint -rn {posargs} --rcfile={toxinidir}/.pylintrc qiskit_experiments/ test/ tools/ + python {toxinidir}/tools/verify_headers.py [testenv:lint-incr] envdir = .tox/lint @@ -61,8 +67,8 @@ allowlist_externals = git commands = black --check {posargs} qiskit_experiments test tools setup.py -git fetch -q https://github.com/Qiskit-Extensions/qiskit-experiments :lint_incr_latest - python "{toxinidir}/tools/pylint_incr.py" -rn -j4 -sn --paths :/qiskit_experiments/*.py :/test/*.py :/tools/*.py - python "{toxinidir}/tools/verify_headers.py" qiskit_experiments test tools + python {toxinidir}/tools/pylint_incr.py -rn {posargs} -sn --paths :/qiskit_experiments/*.py :/test/*.py :/tools/*.py + python {toxinidir}/tools/verify_headers.py qiskit_experiments test tools [testenv:black] envdir = .tox/lint @@ -100,6 +106,20 @@ setenv = commands = sphinx-build -T -W --keep-going -b html {posargs} docs/ docs/_build/html +[testenv:docs-qiskit-main] +usedevelop = True +passenv = + EXPERIMENTS_DEV_DOCS + PROD_BUILD + RELEASE_STRING + VERSION_STRING +deps = + git+https://github.com/Qiskit/qiskit + -r{toxinidir}/requirements-dev.txt + -r{toxinidir}/requirements-extras.txt +commands = + sphinx-build -j auto -T -W --keep-going -b html {posargs} docs/ docs/_build/html + [testenv:docs-clean] skip_install = true deps =