From 5535bfe587b711bae45aeb6760b859563ea8c6d9 Mon Sep 17 00:00:00 2001 From: Josh Izaac Date: Thu, 2 Apr 2020 22:31:53 +1030 Subject: [PATCH] Black Strawberry Fields prior to pre-release, and add continuous integration for PEP8 formatting (#351) * Black Strawberry Fields prior to release * Add a CI to check formatting * blacking --- .github/workflows/format.yml | 13 + strawberryfields/_version.py | 2 +- strawberryfields/api/connection.py | 6 +- strawberryfields/apps/data.py | 6 +- strawberryfields/apps/points.py | 2 +- strawberryfields/apps/vibronic.py | 4 +- strawberryfields/backends/__init__.py | 6 +- strawberryfields/backends/base.py | 15 +- .../backends/fockbackend/backend.py | 51 ++- .../backends/fockbackend/circuit.py | 130 ++++-- strawberryfields/backends/fockbackend/ops.py | 75 +-- .../backends/gaussianbackend/backend.py | 62 +-- .../gaussianbackend/gaussiancircuit.py | 224 +++++---- .../backends/gaussianbackend/ops.py | 54 ++- .../backends/gaussianbackend/states.py | 33 +- strawberryfields/backends/shared_ops.py | 85 ++-- strawberryfields/backends/states.py | 243 +++++----- .../backends/tfbackend/__init__.py | 2 +- .../backends/tfbackend/backend.py | 110 +++-- .../backends/tfbackend/circuit.py | 299 ++++++++---- strawberryfields/backends/tfbackend/ops.py | 430 ++++++++++++++---- strawberryfields/backends/tfbackend/states.py | 195 +++++--- strawberryfields/circuitdrawer.py | 14 +- strawberryfields/circuitspecs/X8.py | 8 +- strawberryfields/circuitspecs/__init__.py | 13 +- .../circuitspecs/circuit_specs.py | 20 +- strawberryfields/circuitspecs/fock.py | 2 +- strawberryfields/circuitspecs/gaussian.py | 2 +- .../circuitspecs/gaussian_unitary.py | 25 +- strawberryfields/circuitspecs/gbs.py | 19 +- strawberryfields/circuitspecs/tensorflow.py | 2 +- strawberryfields/cli/__init__.py | 40 +- strawberryfields/decompositions.py | 99 ++-- strawberryfields/engine.py | 8 +- strawberryfields/io.py | 16 +- strawberryfields/ops.py | 257 ++++++----- strawberryfields/parameters.py | 39 +- strawberryfields/program.py | 47 +- strawberryfields/program_utils.py | 41 +- strawberryfields/utils.py | 1 - 40 files changed, 1727 insertions(+), 973 deletions(-) create mode 100644 .github/workflows/format.yml diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml new file mode 100644 index 000000000..5d0045a2a --- /dev/null +++ b/.github/workflows/format.yml @@ -0,0 +1,13 @@ +name: Formatting check +on: +- pull_request + +jobs: + black: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Black Code Formatter + uses: lgeiger/black-action@v1.0.1 + with: + args: "-l 100 strawberryfields/ --check" diff --git a/strawberryfields/_version.py b/strawberryfields/_version.py index 7d9cc1b19..c119c3aff 100644 --- a/strawberryfields/_version.py +++ b/strawberryfields/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = '0.13.0.rc0' +__version__ = "0.13.0.rc0" diff --git a/strawberryfields/api/connection.py b/strawberryfields/api/connection.py index 6b62799ff..d49ad5e82 100644 --- a/strawberryfields/api/connection.py +++ b/strawberryfields/api/connection.py @@ -152,7 +152,7 @@ def create_job(self, target: str, program: Program, run_options: dict = None) -> circuit = bb.serialize() path = "/jobs" - response = requests.post(self._url(path), headers=self._headers, json={"circuit": circuit},) + response = requests.post(self._url(path), headers=self._headers, json={"circuit": circuit}) if response.status_code == 201: if self._verbose: log.info("The job was successfully submitted.") @@ -220,7 +220,7 @@ def get_job_result(self, job_id: str) -> Result: """ path = "/jobs/{}/result".format(job_id) response = requests.get( - self._url(path), headers={"Accept": "application/x-numpy", **self._headers}, + self._url(path), headers={"Accept": "application/x-numpy", **self._headers} ) if response.status_code == 200: # Read the numpy binary data in the payload into memory @@ -241,7 +241,7 @@ def cancel_job(self, job_id: str): """ path = "/jobs/{}".format(job_id) response = requests.patch( - self._url(path), headers=self._headers, json={"status": JobStatus.CANCELLED.value}, + self._url(path), headers=self._headers, json={"status": JobStatus.CANCELLED.value} ) if response.status_code == 204: if self._verbose: diff --git a/strawberryfields/apps/data.py b/strawberryfields/apps/data.py index babb13a68..1cdc9c3a5 100644 --- a/strawberryfields/apps/data.py +++ b/strawberryfields/apps/data.py @@ -339,9 +339,9 @@ def __init__(self): self.w = scipy.sparse.load_npz(DATA_PATH + self._data_filename + "_w.npz").toarray()[0] self.wp = scipy.sparse.load_npz(DATA_PATH + self._data_filename + "_wp.npz").toarray()[0] self.Ud = scipy.sparse.load_npz(DATA_PATH + self._data_filename + "_Ud.npz").toarray() - self.delta = scipy.sparse.load_npz(DATA_PATH + self._data_filename + "_delta.npz").toarray( - - )[0] + self.delta = scipy.sparse.load_npz( + DATA_PATH + self._data_filename + "_delta.npz" + ).toarray()[0] # pylint: disable=missing-docstring @property diff --git a/strawberryfields/apps/points.py b/strawberryfields/apps/points.py index 7a9dec3e5..e93e82dfa 100644 --- a/strawberryfields/apps/points.py +++ b/strawberryfields/apps/points.py @@ -80,7 +80,7 @@ def rbf_kernel(R: np.ndarray, sigma: float) -> np.ndarray: Returns: K (array): the RBF kernel matrix """ - return np.exp(-(scipy.spatial.distance.cdist(R, R)) ** 2 / 2 / sigma ** 2) + return np.exp(-((scipy.spatial.distance.cdist(R, R)) ** 2) / 2 / sigma ** 2) def sample(K: np.ndarray, n_mean: float, n_samples: int) -> list: diff --git a/strawberryfields/apps/vibronic.py b/strawberryfields/apps/vibronic.py index f6fc54a1a..46015f236 100644 --- a/strawberryfields/apps/vibronic.py +++ b/strawberryfields/apps/vibronic.py @@ -129,6 +129,6 @@ def energies(samples: list, w: np.ndarray, wp: np.ndarray) -> Union[list, float] a single sample energy if only one sample is input """ if not isinstance(samples[0], list): - return np.dot(samples[:len(samples)//2], wp) - np.dot(samples[len(samples)//2:], w) + return np.dot(samples[: len(samples) // 2], wp) - np.dot(samples[len(samples) // 2 :], w) - return [np.dot(s[:len(s)//2], wp) - np.dot(s[len(s)//2:], w) for s in samples] + return [np.dot(s[: len(s) // 2], wp) - np.dot(s[len(s) // 2 :], w) for s in samples] diff --git a/strawberryfields/backends/__init__.py b/strawberryfields/backends/__init__.py index 933a48195..a0d8c5b73 100644 --- a/strawberryfields/backends/__init__.py +++ b/strawberryfields/backends/__init__.py @@ -92,15 +92,13 @@ "TFBackend", "BaseState", "BaseFockState", - "BaseGaussianState" + "BaseGaussianState", ] virtual_backends = ["X8_01"] -local_backends = { - b.short_name: b for b in (BaseBackend, GaussianBackend, FockBackend) -} +local_backends = {b.short_name: b for b in (BaseBackend, GaussianBackend, FockBackend)} def load_backend(name): diff --git a/strawberryfields/backends/base.py b/strawberryfields/backends/base.py index fe0a956c2..28dda852f 100644 --- a/strawberryfields/backends/base.py +++ b/strawberryfields/backends/base.py @@ -28,6 +28,7 @@ class ModeMap: """ Simple internal class for maintaining a map of existing modes. """ + def __init__(self, num_subsystems): self._init = num_subsystems #: list[int]: _map[k] is the internal index used by the backend for @@ -117,7 +118,7 @@ class BaseBackend: """Abstract base class for backends.""" #: str: short name of the backend - short_name = 'base' + short_name = "base" #: str, None: Short name of the CircuitSpecs class used to validate Programs for this backend. None if no validation is required. circuit_spec = None @@ -449,9 +450,10 @@ def state(self, modes=None, **kwargs): raise NotImplementedError -#============================= +# ============================= # Fock-basis backends -#============================= +# ============================= + class BaseFock(BaseBackend): """Abstract base class for backends capable of Fock state manipulation.""" @@ -519,7 +521,6 @@ def prepare_dm_state(self, state, modes): """ raise NotImplementedError - def cubic_phase(self, gamma, mode): r"""Apply the cubic phase operation to the specified mode. @@ -584,9 +585,11 @@ def state(self, modes=None, **kwargs): """ raise NotImplementedError -#============================== + +# ============================== # Gaussian-formulation backends -#============================== +# ============================== + class BaseGaussian(BaseBackend): """Abstract base class for backends that are only capable of Gaussian state manipulation.""" diff --git a/strawberryfields/backends/fockbackend/backend.py b/strawberryfields/backends/fockbackend/backend.py index bbb0d5c89..be1209b6f 100644 --- a/strawberryfields/backends/fockbackend/backend.py +++ b/strawberryfields/backends/fockbackend/backend.py @@ -64,16 +64,18 @@ class FockBackend(BaseFock): ~ops """ - short_name = 'fock' - circuit_spec = 'fock' + short_name = "fock" + circuit_spec = "fock" def __init__(self): """Instantiate a FockBackend object.""" super().__init__() self._supported["mixed_states"] = True self._init_modes = None #: int: initial number of modes in the circuit - self._modemap = None #: Modemap: maps external mode indices to internal ones - self.circuit = None #: ~.fockbackend.circuit.Circuit: representation of the simulated quantum state + self._modemap = None #: Modemap: maps external mode indices to internal ones + self.circuit = ( + None #: ~.fockbackend.circuit.Circuit: representation of the simulated quantum state + ) def _remap_modes(self, modes): if isinstance(modes, int): @@ -84,7 +86,7 @@ def _remap_modes(self, modes): map_ = self._modemap.show() submap = [map_[m] for m in modes] if not self._modemap.valid(modes) or None in submap: - raise ValueError('The specified modes are not valid.') + raise ValueError("The specified modes are not valid.") remapped_modes = self._modemap.remap(modes) if was_int: @@ -110,8 +112,8 @@ def begin_circuit(self, num_subsystems, **kwargs): For each mode, the simulator can represent the Fock states :math:`\ket{0}, \ket{1}, \ldots, \ket{\text{cutoff_dim}-1}`. pure (bool): If True (default), use a pure state representation (otherwise will use a mixed state representation). """ - cutoff_dim = kwargs.get('cutoff_dim', None) - pure = kwargs.get('pure', True) + cutoff_dim = kwargs.get("cutoff_dim", None) + pure = kwargs.get("pure", True) if cutoff_dim is None: raise ValueError("Argument 'cutoff_dim' must be passed to the Fock backend") if not isinstance(cutoff_dim, int): @@ -140,7 +142,7 @@ def get_modes(self): return [i for i, j in enumerate(self._modemap._map) if j is not None] def reset(self, pure=True, **kwargs): - cutoff = kwargs.get('cutoff_dim', self.circuit._trunc) + cutoff = kwargs.get("cutoff_dim", self.circuit._trunc) self._modemap.reset() self.circuit.reset(pure, num_subsystems=self._init_modes, cutoff_dim=cutoff) @@ -169,12 +171,16 @@ def squeeze(self, z, mode): self.circuit.squeeze(abs(z), phase(z), self._remap_modes(mode)) def two_mode_squeeze(self, z, mode1, mode2): - self.circuit.two_mode_squeeze(abs(z), phase(z), self._remap_modes(mode1), self._remap_modes(mode2)) + self.circuit.two_mode_squeeze( + abs(z), phase(z), self._remap_modes(mode1), self._remap_modes(mode2) + ) def beamsplitter(self, t, r, mode1, mode2): if isinstance(t, complex): raise ValueError("Beamsplitter transmittivity t must be a float.") - self.circuit.beamsplitter(t, abs(r), phase(r), self._remap_modes(mode1), self._remap_modes(mode2)) + self.circuit.beamsplitter( + t, abs(r), phase(r), self._remap_modes(mode1), self._remap_modes(mode2) + ) def measure_homodyne(self, phi, mode, shots=1, select=None, **kwargs): """Perform a homodyne measurement on the specified mode. @@ -187,8 +193,9 @@ def measure_homodyne(self, phi, mode, shots=1, select=None, **kwargs): max (float): The pdf is discretized onto the 1D grid [-max,max] (default: 10). """ if shots != 1: - raise NotImplementedError("fock backend currently does not support " - "shots != 1 for homodyne measurement") + raise NotImplementedError( + "fock backend currently does not support " "shots != 1 for homodyne measurement" + ) return self.circuit.measure_homodyne(phi, self._remap_modes(mode), select=select, **kwargs) def loss(self, T, mode): @@ -200,7 +207,6 @@ def is_vacuum(self, tol=0.0, **kwargs): def get_cutoff_dim(self): return self.circuit._trunc - def state(self, modes=None, **kwargs): s, pure = self.circuit.get_state() @@ -216,7 +222,7 @@ def state(self, modes=None, **kwargs): left_str = [indices[i] for i in range(0, 2 * num_modes, 2)] right_str = [indices[i] for i in range(1, 2 * num_modes, 2)] out_str = [indices[: 2 * num_modes]] - einstr = ''.join(left_str + [','] + right_str + ['->'] + out_str) + einstr = "".join(left_str + [","] + right_str + ["->"] + out_str) rho = np.einsum(einstr, s, s.conj()) else: rho = s @@ -230,7 +236,9 @@ def state(self, modes=None, **kwargs): num_modes = len(rho.shape) // 2 if len(modes) > num_modes: - raise ValueError("The number of specified modes cannot be larger than the number of subsystems.") + raise ValueError( + "The number of specified modes cannot be larger than the number of subsystems." + ) keep_indices = indices[: 2 * len(modes)] trace_indices = indices[2 * len(modes) : len(modes) + num_modes] @@ -242,13 +250,13 @@ def state(self, modes=None, **kwargs): ind.insert(m, keep_indices[2 * ctr : 2 * (ctr + 1)]) ctr += 1 - indStr = ''.join(ind) + '->' + keep_indices + indStr = "".join(ind) + "->" + keep_indices red_state = np.einsum(indStr, rho) # permute indices of returned state to reflect the ordering of modes (we know and hence can assume that red_state is a mixed state) if modes != sorted(modes): mode_permutation = np.argsort(modes) - index_permutation = [2*x+i for x in mode_permutation for i in (0, 1)] + index_permutation = [2 * x + i for x in mode_permutation for i in (0, 1)] red_state = np.transpose(red_state, np.argsort(index_permutation)) cutoff = self.circuit._trunc @@ -276,10 +284,13 @@ def kerr_interaction(self, kappa, mode): self.circuit.kerr_interaction(kappa, self._remap_modes(mode)) def cross_kerr_interaction(self, kappa, mode1, mode2): - self.circuit.cross_kerr_interaction(kappa, self._remap_modes(mode1), self._remap_modes(mode2)) + self.circuit.cross_kerr_interaction( + kappa, self._remap_modes(mode1), self._remap_modes(mode2) + ) def measure_fock(self, modes, shots=1, select=None, **kwargs): if shots != 1: - raise NotImplementedError("fock backend currently does not support " - "shots != 1 for Fock measurement") + raise NotImplementedError( + "fock backend currently does not support " "shots != 1 for Fock measurement" + ) return self.circuit.measure_fock(self._remap_modes(modes), select=select) diff --git a/strawberryfields/backends/fockbackend/circuit.py b/strawberryfields/backends/fockbackend/circuit.py index 3529e2258..f8233e20b 100644 --- a/strawberryfields/backends/fockbackend/circuit.py +++ b/strawberryfields/backends/fockbackend/circuit.py @@ -32,7 +32,8 @@ indices = string.ascii_lowercase MAX_MODES = len(indices) - 3 -class Circuit(): + +class Circuit: """ Class implementing a basic simulator for a collection of modes in the fock basis. @@ -79,7 +80,9 @@ def _apply_channel(self, kraus_ops, modes): self._pure = False if len(kraus_ops) == 0: - self._state = np.zeros([self._trunc for i in range(self._num_modes*2)], dtype=ops.def_type) + self._state = np.zeros( + [self._trunc for i in range(self._num_modes * 2)], dtype=ops.def_type + ) else: states = [self.apply_gate_BLAS(k, modes, pure=False) for k in kraus_ops] self._state = sum(states) @@ -142,12 +145,12 @@ def apply_gate_BLAS(self, mat, modes, **kwargs): n = kwargs.get("n", self._num_modes) size = len(modes) - dim = self._trunc**size + dim = self._trunc ** size stshape = [self._trunc for i in range(size)] # Apply the following matrix transposition: # |m1> |m1>|m2>...|mn>|mode[1]>...|mode[n]> [i2, i3, j2, j3, i1, j1] # if the gate is applied to modes 2 and 3 (which are already switched to the first two modes) transpose_list = np.arange(2 * self._num_modes) - transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]] + transpose_list[[t1 + 1, t2]] = transpose_list[[t2, t1 + 1]] self._state = self._state.transpose(transpose_list) self._state = self._state.transpose(switch_list_1) @@ -322,8 +331,8 @@ def _apply_BS(mat, state, trunc): # pragma: no cover ret = np.zeros_like(state, dtype=np.complex128) for i in range(trunc): for j in range(trunc): - for k in range(max(1+i+j-trunc, 0), min(i+j, trunc-1) + 1): - ret[i, j] += mat[i, k, j, i+j-k] * state[k, i+j-k] + for k in range(max(1 + i + j - trunc, 0), min(i + j, trunc - 1) + 1): + ret[i, j] += mat[i, k, j, i + j - k] * state[k, i + j - k] return ret # ignored in covtest (doesn't work well with the jit decorator) @@ -352,8 +361,8 @@ def _apply_S2(mat, state, trunc): # pragma: no cover ret = np.zeros_like(state, dtype=np.complex128) for i in range(trunc): for j in range(trunc): - for k in range(max(i-j, 0), trunc + min(i-j, 0)): - ret[i, k] += mat[i, j, k, k + j-i] * state[j, k + j-i] + for k in range(max(i - j, 0), trunc + min(i - j, 0)): + ret[i, k] += mat[i, j, k, k + j - i] * state[j, k + j - i] return ret def norm(self): @@ -406,16 +415,19 @@ def prepare_multimode(self, state, modes): modes = [modes] n_modes = len(modes) - pure_shape = tuple([self._trunc]*n_modes) - mixed_shape = tuple([self._trunc]*(2*n_modes)) - pure_shape_as_vector = tuple([self._trunc**n_modes]) - mixed_shape_as_matrix = tuple([self._trunc**n_modes]*2) + pure_shape = tuple([self._trunc] * n_modes) + mixed_shape = tuple([self._trunc] * (2 * n_modes)) + pure_shape_as_vector = tuple([self._trunc ** n_modes]) + mixed_shape_as_matrix = tuple([self._trunc ** n_modes] * 2) # Do consistency checks if self._checks: - if state.shape != pure_shape and state.shape != mixed_shape \ - and \ - state.shape != pure_shape_as_vector and state.shape != mixed_shape_as_matrix: + if ( + state.shape != pure_shape + and state.shape != mixed_shape + and state.shape != pure_shape_as_vector + and state.shape != mixed_shape_as_matrix + ): raise ValueError("Incorrect shape for state preparation") if len(modes) != len(set(modes)): raise ValueError("The specified modes cannot appear multiple times.") @@ -446,14 +458,16 @@ def prepare_multimode(self, state, modes): self._state = np.tensordot(reduced_state, state, axes=0) # unless the preparation was meant to go into the last modes in the standard order, we need to swap indices around - if modes != list(range(self._num_modes-len(modes), self._num_modes)): + if modes != list(range(self._num_modes - len(modes), self._num_modes)): mode_permutation = [x for x in range(self._num_modes) if x not in modes] + modes if self._pure: scale = 1 index_permutation = mode_permutation else: scale = 2 - index_permutation = [scale*x+i for x in mode_permutation for i in (0, 1)] #two indices per mode if we have pure states + index_permutation = [ + scale * x + i for x in mode_permutation for i in (0, 1) + ] # two indices per mode if we have pure states index_permutation = np.argsort(index_permutation) self._state = np.transpose(self._state, index_permutation) @@ -595,7 +609,7 @@ def is_vacuum(self, tol): fid = np.abs(self._state.flat[0]) ** 2 else: fid = self._state.flat[0] - return np.abs(fid-1) <= tol + return np.abs(fid - 1) <= tol def get_state(self): """ @@ -628,8 +642,10 @@ def measure_fock(self, modes, select=None): # make sure modes and select are the same length if len(select) != len(modes): - raise ValueError("When performing post-selection, the number of " - "selected values (including None) must match the number of measured modes") + raise ValueError( + "When performing post-selection, the number of " + "selected values (including None) must match the number of measured modes" + ) # make sure the select values are all integers or nones if not all(isinstance(s, int) or s is None for s in select): @@ -643,7 +659,9 @@ def measure_fock(self, modes, select=None): select_values = [s for s in select if s is not None] # project out postselected modes - self._state = ops.project_reset(selected, select_values, self._state, self._pure, self._num_modes, self._trunc) + self._state = ops.project_reset( + selected, select_values, self._state, self._pure, self._num_modes, self._trunc + ) if self.norm() == 0: raise ZeroDivisionError("Measurement has zero probability.") @@ -661,7 +679,7 @@ def measure_fock(self, modes, select=None): reduced = ops.partial_trace(state, self._num_modes, unmeasured) dist = np.ravel(ops.diagonal(reduced, len(measure)).real) # kill spurious tiny values (which are sometimes negative) - dist = dist * ~np.isclose(dist, 0.) + dist = dist * ~np.isclose(dist, 0.0) # Make a random choice if sum(dist) != 1: @@ -679,7 +697,9 @@ def measure_fock(self, modes, select=None): outcome[permutation[i]] = permuted_outcome[i] # Project the state onto the measurement outcome & reset in vacuum - self._state = ops.project_reset(measure, outcome, self._state, self._pure, self._num_modes, self._trunc) + self._state = ops.project_reset( + measure, outcome, self._state, self._pure, self._num_modes, self._trunc + ) if self.norm() == 0: raise ZeroDivisionError("Measurement has zero probability.") @@ -696,7 +716,7 @@ def measure_homodyne(self, phi, mode, select=None, **kwargs): """ Performs a homodyne measurement on a mode. """ - m_omega_over_hbar = 1/self._hbar + m_omega_over_hbar = 1 / self._hbar # Make sure the state is mixed for reduced density matrix if self._pure: @@ -711,27 +731,31 @@ def measure_homodyne(self, phi, mode, select=None, **kwargs): else: raise TypeError("Selected measurement result must be of numeric type.") else: - # Compute reduced density matrix + # Compute reduced density matrix unmeasured = [i for i in range(self._num_modes) if not i == mode] reduced = ops.partial_trace(state, self._num_modes, unmeasured) # Rotate to measurement basis - reduced = self.apply_gate_BLAS(ops.phase(-phi, self._trunc), [0], state=reduced, pure=False, n=1) + reduced = self.apply_gate_BLAS( + ops.phase(-phi, self._trunc), [0], state=reduced, pure=False, n=1 + ) # Create pdf. Same as tf implementation, but using # the recursive relation H_0(x) = 1, H_1(x) = 2x, H_{n+1}(x) = 2xH_n(x) - 2nH_{n-1}(x) - q_mag = kwargs.get('max', 10) - num_bins = kwargs.get('num_bins', 100000) + q_mag = kwargs.get("max", 10) + num_bins = kwargs.get("num_bins", 100000) q_tensor, Hvals = ops.hermiteVals(q_mag, num_bins, m_omega_over_hbar, self._trunc) H_matrix = np.zeros((self._trunc, self._trunc, num_bins)) for n, m in product(range(self._trunc), repeat=2): - H_matrix[n][m] = 1 / sqrt(2**n * bang(n) * 2**m * bang(m)) * Hvals[n] * Hvals[m] + H_matrix[n][m] = 1 / sqrt(2 ** n * bang(n) * 2 ** m * bang(m)) * Hvals[n] * Hvals[m] H_terms = np.expand_dims(reduced, -1) * np.expand_dims(H_matrix, 0) - rho_dist = np.sum(H_terms, axis=(1, 2)) \ - * (m_omega_over_hbar/pi)**0.5 \ - * np.exp(-m_omega_over_hbar * q_tensor**2) \ - * (q_tensor[1] - q_tensor[0]) # Delta_q for normalization (only works if the bins are equally spaced) + rho_dist = ( + np.sum(H_terms, axis=(1, 2)) + * (m_omega_over_hbar / pi) ** 0.5 + * np.exp(-m_omega_over_hbar * q_tensor ** 2) + * (q_tensor[1] - q_tensor[0]) + ) # Delta_q for normalization (only works if the bins are equally spaced) # Sample from rho_dist. This is a bit different from tensorflow due to how # numpy treats multinomial sampling. In particular, numpy returns a @@ -744,15 +768,21 @@ def measure_homodyne(self, phi, mode, select=None, **kwargs): homodyne_sample = q_tensor[sample_idx] # Project remaining modes into the conditional state - inf_squeezed_vac = \ - np.array([(-0.5)**(n//2) * sqrt(bang(n)) / bang(n//2) if n%2 == 0 else 0.0 + 0.0j \ - for n in range(self._trunc)], dtype=ops.def_type) + inf_squeezed_vac = np.array( + [ + (-0.5) ** (n // 2) * sqrt(bang(n)) / bang(n // 2) if n % 2 == 0 else 0.0 + 0.0j + for n in range(self._trunc) + ], + dtype=ops.def_type, + ) alpha = homodyne_sample * sqrt(m_omega_over_hbar / 2) composed = np.dot(ops.phase(phi, self._trunc), ops.displacement(alpha, self._trunc)) eigenstate = self.apply_gate_BLAS(composed, [0], state=inf_squeezed_vac, pure=True, n=1) - vac_state = np.array([1.0 + 0.0j if i == 0 else 0.0 + 0.0j for i in range(self._trunc)], dtype=ops.def_type) + vac_state = np.array( + [1.0 + 0.0j if i == 0 else 0.0 + 0.0j for i in range(self._trunc)], dtype=ops.def_type + ) projector = np.outer(vac_state, eigenstate.conj()) self._state = self.apply_gate_BLAS(projector, [mode]) diff --git a/strawberryfields/backends/fockbackend/ops.py b/strawberryfields/backends/fockbackend/ops.py index 5df5b0d75..0421fa811 100644 --- a/strawberryfields/backends/fockbackend/ops.py +++ b/strawberryfields/backends/fockbackend/ops.py @@ -21,7 +21,7 @@ from itertools import product import numpy as np -from numpy import (sqrt, sinh, cosh, tanh, array, exp) +from numpy import sqrt, sinh, cosh, tanh, array, exp from numpy.polynomial.hermite import hermval as H from scipy.special import factorial as fac @@ -79,14 +79,14 @@ def indexRange(lst, trunc): for vals in product(*([range(trunc) for x in lst if x is None])): gen = genOfTuple(vals) - yield [next(gen) if v is None else v for v in lst] #pylint: disable=stop-iteration-return + yield [next(gen) if v is None else v for v in lst] # pylint: disable=stop-iteration-return def index(lst, trunc): """ Converts an n-ary index to a 1-dimensional index. """ - return sum([lst[i] * trunc**(len(lst)-i-1) for i in range(len(lst))]) + return sum([lst[i] * trunc ** (len(lst) - i - 1) for i in range(len(lst))]) def unIndex(i, n, trunc): @@ -94,7 +94,7 @@ def unIndex(i, n, trunc): Converts a 1-dimensional index ``i`` with truncation ``trunc`` and number of modes ``n`` to a n-ary index. """ - return [i // trunc**(n - 1 - m) % trunc for m in range(n)] + return [i // trunc ** (n - 1 - m) % trunc for m in range(n)] def sliceExp(axes, ind, n): @@ -108,7 +108,7 @@ def abssqr(z): r""" Given :math:`z` returns :math:`|z|^2`. """ - return z.real**2 + z.imag**2 + return z.real ** 2 + z.imag ** 2 def dagger(mat): @@ -124,10 +124,10 @@ def mix(state, n): shape of the input state. """ - left_str = [indices[i] for i in range(0, 2*n, 2)] - right_str = [indices[i] for i in range(1, 2*n, 2)] - out_str = [indices[:2*n]] - einstr = ''.join(left_str + [','] + right_str + ['->'] + out_str) + left_str = [indices[i] for i in range(0, 2 * n, 2)] + right_str = [indices[i] for i in range(1, 2 * n, 2)] + out_str = [indices[: 2 * n]] + einstr = "".join(left_str + [","] + right_str + ["->"] + out_str) return np.einsum(einstr, state, state.conj()) @@ -138,7 +138,7 @@ def diagonal(state, n): left_str = [indices[i] + indices[i] for i in range(n)] out_str = [indices[:n]] - einstr = ''.join(left_str + ['->'] + out_str) + einstr = "".join(left_str + ["->"] + out_str) return np.einsum(einstr, state) @@ -148,7 +148,7 @@ def trace(state, n): """ left_str = [indices[i] + indices[i] for i in range(n)] - einstr = ''.join(left_str) + einstr = "".join(left_str) return np.einsum(einstr, state) @@ -158,9 +158,12 @@ def partial_trace(state, n, modes): Expects state to be in mixed state form. """ - left_str = [indices[2*i] + indices[2*i] if i in modes else indices[2*i:2*i+2] for i in range(n)] - out_str = ['' if i in modes else indices[2*i:2*i+2] for i in range(n)] - einstr = ''.join(left_str + ['->'] + out_str) + left_str = [ + indices[2 * i] + indices[2 * i] if i in modes else indices[2 * i : 2 * i + 2] + for i in range(n) + ] + out_str = ["" if i in modes else indices[2 * i : 2 * i + 2] for i in range(n)] + einstr = "".join(left_str + ["->"] + out_str) return np.einsum(einstr, state) @@ -179,7 +182,7 @@ def tensor(u, v, n, pure, pos=None): else: scale = 2 for i in range(v.ndim): - w = np.rollaxis(w, scale*n + i, scale*pos + i) + w = np.rollaxis(w, scale * n + i, scale * pos + i) return w @@ -194,13 +197,13 @@ def project_reset(modes, x, state, pure, n, trunc): def intersperse(lst): # pylint: disable=missing-docstring - return tuple([lst[i//2] for i in range(len(lst)*2)]) + return tuple([lst[i // 2] for i in range(len(lst) * 2)]) if pure: ret = np.zeros([trunc for i in range(n)], dtype=def_type) ret[outSlice] = state[inSlice] else: - ret = np.zeros([trunc for i in range(n*2)], dtype=def_type) + ret = np.zeros([trunc for i in range(n * 2)], dtype=def_type) ret[intersperse(outSlice)] = state[intersperse(inSlice)] return ret @@ -220,7 +223,7 @@ def a(trunc): """ ret = np.zeros((trunc, trunc), dtype=def_type) for i in range(1, trunc): - ret[i-1][i] = sqrt(i) + ret[i - 1][i] = sqrt(i) return ret @@ -287,7 +290,7 @@ def kerr(kappa, trunc): The Kerr interaction :math:`K(\kappa)`. """ n = np.arange(trunc) - ret = np.diag(np.exp(1j*kappa*n**2)) + ret = np.diag(np.exp(1j * kappa * n ** 2)) return ret @@ -298,8 +301,8 @@ def cross_kerr(kappa, trunc): """ n1 = np.arange(trunc)[None, :] n2 = np.arange(trunc)[:, None] - n1n2 = np.ravel(n1*n2) - ret = np.diag(np.exp(1j*kappa*n1n2)).reshape([trunc]*4).swapaxes(1, 2) + n1n2 = np.ravel(n1 * n2) + ret = np.diag(np.exp(1j * kappa * n1n2)).reshape([trunc] * 4).swapaxes(1, 2) return ret @@ -309,9 +312,9 @@ def cubicPhase(gamma, hbar, trunc): The cubic phase gate :math:`\exp{(i\frac{\gamma}{3\hbar}\hat{x}^3)}`. """ a_ = a(trunc) - x = (a_ + np.conj(a_).T) * np.sqrt(hbar/2) + x = (a_ + np.conj(a_).T) * np.sqrt(hbar / 2) x3 = x @ x @ x - ret = matrixExp(1j * gamma / (3*hbar) * x3) + ret = matrixExp(1j * gamma / (3 * hbar) * x3) return ret @@ -321,7 +324,7 @@ def phase(theta, trunc): r""" The phase gate :math:`R(\theta)` """ - return np.array(np.diag([exp(1j*n*theta) for n in range(trunc)]), dtype=def_type) + return np.array(np.diag([exp(1j * n * theta) for n in range(trunc)]), dtype=def_type) @functools.lru_cache() @@ -338,7 +341,7 @@ def beamsplitter(t, r, phi, trunc): BS_tw, _, _ = BSgate(theta, phi, cutoff=trunc) # TODO: Transpose needed because of different conventions in SF and The Walrus. Remove when The Walrus is updated. - return BS_tw.transpose((0,2,1,3)) + return BS_tw.transpose((0, 2, 1, 3)) @functools.lru_cache() @@ -350,6 +353,7 @@ def proj(i, j, trunc): P[j][i] = 1.0 + 0.0j return P + # ============================================ # # State vectors @@ -372,7 +376,7 @@ def vacuumStateMixed(n, trunc): The `n`-mode mixed vacuum state :math:`\ket{00\dots 0}\bra{00\dots 0}` """ - state = np.zeros([trunc for i in range(n*2)], dtype=def_type) + state = np.zeros([trunc for i in range(n * 2)], dtype=def_type) state.ravel()[0] = 1.0 + 0.0j return state @@ -390,9 +394,11 @@ def coherentState(alpha, trunc): r""" The coherent state :math:`D(\alpha)\ket{0}`. """ + def entry(n): """coherent summation term""" - return alpha**n / sqrt(fac(n)) + return alpha ** n / sqrt(fac(n)) + return exp(-abssqr(alpha) / 2) * array([entry(n) for n in range(trunc)]) @@ -401,12 +407,13 @@ def squeezedState(r, theta, trunc): r""" The squeezed state :math:`S(re^{i\theta})`. """ + def entry(n): """squeezed summation term""" - return (sqrt(fac(2*n))/(2**n*fac(n))) * (-exp(1j*theta)*tanh(r))**n + return (sqrt(fac(2 * n)) / (2 ** n * fac(n))) * (-exp(1j * theta) * tanh(r)) ** n - vec = array([entry(n//2) if n % 2 == 0 else 0.0 + 0.0j for n in range(trunc)]) - return sqrt(1/cosh(r)) * vec + vec = array([entry(n // 2) if n % 2 == 0 else 0.0 + 0.0j for n in range(trunc)]) + return sqrt(1 / cosh(r)) * vec @functools.lru_cache() @@ -466,7 +473,7 @@ def lossChannel(T, trunc): The Kraus operators for the loss channel :math:`\mathcal{N}(T)`. """ - TToAdaggerA = np.array(np.diag([T**(i/2) for i in range(trunc)]), dtype=def_type) + TToAdaggerA = np.array(np.diag([T ** (i / 2) for i in range(trunc)]), dtype=def_type) def aToN(n): """the nth matrix power of the annihilation operator matrix a""" @@ -474,7 +481,7 @@ def aToN(n): def E(n): """the loss channel amplitudes in the Fock basis""" - return ((1-T)/T)**(n/2) * np.dot(aToN(n)/sqrt(fac(n)), TToAdaggerA) + return ((1 - T) / T) ** (n / 2) * np.dot(aToN(n) / sqrt(fac(n)), TToAdaggerA) if T == 0: return [proj(i, 0, trunc) for i in range(trunc)] @@ -500,8 +507,8 @@ def hermiteVals(q_mag, num_bins, m_omega_over_hbar, trunc): Hvals = [None] * trunc Hvals[0] = 1 - Hvals[1] = 2*x + Hvals[1] = 2 * x for i in range(2, trunc): - Hvals[i] = 2*x*Hvals[i-1] - 2*(i-1)*Hvals[i-2] + Hvals[i] = 2 * x * Hvals[i - 1] - 2 * (i - 1) * Hvals[i - 2] return q_tensor, Hvals diff --git a/strawberryfields/backends/gaussianbackend/backend.py b/strawberryfields/backends/gaussianbackend/backend.py index cad6f9735..af08c25cf 100644 --- a/strawberryfields/backends/gaussianbackend/backend.py +++ b/strawberryfields/backends/gaussianbackend/backend.py @@ -148,11 +148,14 @@ def measure_homodyne(self, phi, mode, shots=1, select=None, **kwargs): """ if shots != 1: if select is not None: - raise NotImplementedError("Gaussian backend currently does not support " - "postselection if shots != 1 for homodyne measurement") + raise NotImplementedError( + "Gaussian backend currently does not support " + "postselection if shots != 1 for homodyne measurement" + ) - raise NotImplementedError("Gaussian backend currently does not support " - "shots != 1 for homodyne measurement") + raise NotImplementedError( + "Gaussian backend currently does not support " "shots != 1 for homodyne measurement" + ) # phi is the rotation of the measurement operator, hence the minus self.circuit.phase_shift(-phi, mode) @@ -169,11 +172,15 @@ def measure_heterodyne(self, mode, shots=1, select=None): if shots != 1: if select is not None: - raise NotImplementedError("Gaussian backend currently does not support " - "postselection if shots != 1 for heterodyne measurement") - - raise NotImplementedError("Gaussian backend currently does not support " - "shots != 1 for heterodyne measurement") + raise NotImplementedError( + "Gaussian backend currently does not support " + "postselection if shots != 1 for heterodyne measurement" + ) + + raise NotImplementedError( + "Gaussian backend currently does not support " + "shots != 1 for heterodyne measurement" + ) if select is None: m = identity(2) @@ -192,9 +199,7 @@ def prepare_gaussian_state(self, r, V, modes): # make sure number of modes matches shape of r and V N = len(modes) if len(r) != 2 * N: - raise ValueError( - "Length of means vector must be twice the number of modes." - ) + raise ValueError("Length of means vector must be twice the number of modes.") if V.shape != (2 * N, 2 * N): raise ValueError( "Shape of covariance matrix must be [2N, 2N], where N is the number of modes." @@ -220,10 +225,13 @@ def thermal_loss(self, T, nbar, mode): def measure_fock(self, modes, shots=1, select=None): if shots != 1: if select is not None: - raise NotImplementedError("Gaussian backend currently does not support " - "postselection") - warnings.warn("Cannot simulate non-Gaussian states. " - "Conditional state after Fock measurement has not been updated.") + raise NotImplementedError( + "Gaussian backend currently does not support " "postselection" + ) + warnings.warn( + "Cannot simulate non-Gaussian states. " + "Conditional state after Fock measurement has not been updated." + ) mu = self.circuit.mean mean = self.circuit.smeanxp() @@ -245,21 +253,24 @@ def measure_fock(self, modes, shots=1, select=None): samples = samples.reshape((len(modes),)) return samples - def measure_threshold(self, modes, shots=1, select=None): if shots != 1: if select is not None: - raise NotImplementedError("Gaussian backend currently does not support " - "postselection") - warnings.warn("Cannot simulate non-Gaussian states. " - "Conditional state after Threshold measurement has not been updated.") + raise NotImplementedError( + "Gaussian backend currently does not support " "postselection" + ) + warnings.warn( + "Cannot simulate non-Gaussian states. " + "Conditional state after Threshold measurement has not been updated." + ) mu = self.circuit.mean cov = self.circuit.scovmatxp() # check we are sampling from a gaussian state with zero mean if not allclose(mu, zeros_like(mu)): - raise NotImplementedError("Threshold measurement is only supported for " - "Gaussian states with zero mean") + raise NotImplementedError( + "Threshold measurement is only supported for " "Gaussian states with zero mean" + ) x_idxs = array(modes) p_idxs = x_idxs + len(mu) modes_idxs = concatenate([x_idxs, p_idxs]) @@ -271,7 +282,6 @@ def measure_threshold(self, modes, shots=1, select=None): samples = samples.reshape((len(modes),)) return samples - def state(self, modes=None, **kwargs): """Returns the state of the quantum simulation. @@ -317,6 +327,4 @@ def state(self, modes=None, **kwargs): else: Amat = self.circuit.Amat() - return GaussianState( - (means, covmat), len(modes), qmat, Amat, mode_names=mode_names - ) + return GaussianState((means, covmat), len(modes), qmat, Amat, mode_names=mode_names) diff --git a/strawberryfields/backends/gaussianbackend/gaussiancircuit.py b/strawberryfields/backends/gaussianbackend/gaussiancircuit.py index 73be8a469..3c9829934 100644 --- a/strawberryfields/backends/gaussianbackend/gaussiancircuit.py +++ b/strawberryfields/backends/gaussianbackend/gaussiancircuit.py @@ -24,6 +24,7 @@ class GaussianModes: covariance matrix and a mean vector. The modes are initialized in the (multimode) vacuum state, The state of the modes is manipulated by calling the various methods.""" + # pylint: disable=too-many-public-methods def __init__(self, num_subsystems): @@ -54,7 +55,7 @@ def __init__(self, num_subsystems): def add_mode(self, n=1): """add mode to the circuit""" - newnlen = self.nlen+n + newnlen = self.nlen + n newnmat = np.zeros((newnlen, newnlen), dtype=complex) newmmat = np.zeros((newnlen, newnlen), dtype=complex) newmean = np.zeros(newnlen, dtype=complex) @@ -108,7 +109,7 @@ def get_modes(self): def displace(self, beta, i): """ Implements a displacement operation by the complex number beta in mode i""" - #Update displacement of mode i by the complex amount bet + # Update displacement of mode i by the complex amount bet if self.active[i] is None: raise ValueError("Cannot displace mode, mode does not exist") @@ -119,28 +120,36 @@ def squeeze(self, r, phi, k): if self.active[k] is None: raise ValueError("Cannot squeeze mode, mode does not exist") - phase = np.exp(1j*phi) - phase2 = phase*phase + phase = np.exp(1j * phi) + phase2 = phase * phase sh = np.sinh(r) ch = np.cosh(r) - sh2 = sh*sh - ch2 = ch*ch - shch = sh*ch + sh2 = sh * sh + ch2 = ch * ch + shch = sh * ch nk = np.copy(self.nmat[k]) mk = np.copy(self.mmat[k]) alphak = np.copy(self.mean[k]) # Update displacement of mode k - self.mean[k] = alphak*ch-phase*np.conj(alphak)*sh + self.mean[k] = alphak * ch - phase * np.conj(alphak) * sh # Update covariance matrix elements. Only the k column and row of nmat and mmat need to be updated. # First update the diagonal elements - self.nmat[k, k] = sh2 - phase*shch*np.conj(mk[k]) - shch*np.conj(phase)*mk[k] + ch2*nk[k] + sh2*nk[k] - self.mmat[k, k] = -(phase*shch) + phase2*sh2*np.conj(mk[k]) + ch2*mk[k] - 2*phase*shch*nk[k] + self.nmat[k, k] = ( + sh2 + - phase * shch * np.conj(mk[k]) + - shch * np.conj(phase) * mk[k] + + ch2 * nk[k] + + sh2 * nk[k] + ) + self.mmat[k, k] = ( + -(phase * shch) + phase2 * sh2 * np.conj(mk[k]) + ch2 * mk[k] - 2 * phase * shch * nk[k] + ) # Update the column k for l in np.delete(np.arange(self.nlen), k): - self.nmat[k, l] = -(sh*np.conj(phase)*mk[l]) + ch*nk[l] - self.mmat[k, l] = ch*mk[l] - phase*sh*nk[l] + self.nmat[k, l] = -(sh * np.conj(phase) * mk[l]) + ch * nk[l] + self.mmat[k, l] = ch * mk[l] - phase * sh * nk[l] # Update row k self.nmat[:, k] = np.conj(self.nmat[k]) @@ -151,19 +160,19 @@ def phase_shift(self, phi, k): if self.active[k] is None: raise ValueError("Cannot phase shift mode, mode does not exist") - phase = np.exp(1j*phi) - phase2 = phase*phase + phase = np.exp(1j * phi) + phase2 = phase * phase # Update displacement of mode k - self.mean[k] = self.mean[k]*phase + self.mean[k] = self.mean[k] * phase # Update covariance matrix elements. Only the k column and row of nmat and mmat need to be updated. # First update the diagonal elements - self.mmat[k][k] = phase2*self.mmat[k][k] + self.mmat[k][k] = phase2 * self.mmat[k][k] # Update the column k for l in np.delete(np.arange(self.nlen), k): - self.nmat[k][l] = np.conj(phase)*self.nmat[k][l] - self.mmat[k][l] = phase*self.mmat[k][l] + self.nmat[k][l] = np.conj(phase) * self.nmat[k][l] + self.mmat[k][l] = phase * self.mmat[k][l] # Update row k self.nmat[:, k] = np.conj(self.nmat[k]) @@ -177,13 +186,13 @@ def beamsplitter(self, theta, phi, k, l): if k == l: raise ValueError("Cannot use the same mode for beamsplitter inputs") - phase = np.exp(1j*phi) - phase2 = phase*phase + phase = np.exp(1j * phi) + phase2 = phase * phase sh = np.sin(theta) ch = np.cos(theta) - sh2 = sh*sh - ch2 = ch*ch - shch = sh*ch + sh2 = sh * sh + ch2 = ch * ch + shch = sh * ch # alpha1 = self.mean[0] nk = np.copy(self.nmat[k]) @@ -193,26 +202,39 @@ def beamsplitter(self, theta, phi, k, l): # Update displacement of mode k and l alphak = np.copy(self.mean[k]) alphal = np.copy(self.mean[l]) - self.mean[k] = ch*alphak+phase*sh*alphal - self.mean[l] = ch*alphal-np.conj(phase)*sh*alphak + self.mean[k] = ch * alphak + phase * sh * alphal + self.mean[l] = ch * alphal - np.conj(phase) * sh * alphak # Update covariance matrix elements. Only the k and l columns and rows of nmat and mmat need to be updated. # First update the (k,k), (k,l), (l,l), and (l,l) elements - self.nmat[k][k] = ch2*nk[k] + phase*shch*nk[l] + shch*np.conj(phase)*nl[k] + sh2*nl[l] - self.nmat[k][l] = -(shch*np.conj(phase)*nk[k]) + ch2*nk[l] - sh2*np.conj(phase2)*nl[k] + shch*np.conj(phase)*nl[l] + self.nmat[k][k] = ( + ch2 * nk[k] + phase * shch * nk[l] + shch * np.conj(phase) * nl[k] + sh2 * nl[l] + ) + self.nmat[k][l] = ( + -(shch * np.conj(phase) * nk[k]) + + ch2 * nk[l] + - sh2 * np.conj(phase2) * nl[k] + + shch * np.conj(phase) * nl[l] + ) self.nmat[l][k] = np.conj(self.nmat[k][l]) - self.nmat[l][l] = sh2*nk[k] - phase*shch*nk[l] - shch*np.conj(phase)*nl[k] + ch2*nl[l] - - self.mmat[k][k] = ch2*mk[k] + 2*phase*shch*ml[k] + phase2*sh2*ml[l] - self.mmat[k][l] = -(shch*np.conj(phase)*mk[k]) + ch2*ml[k] - sh2*ml[k] + phase*shch*ml[l] + self.nmat[l][l] = ( + sh2 * nk[k] - phase * shch * nk[l] - shch * np.conj(phase) * nl[k] + ch2 * nl[l] + ) + + self.mmat[k][k] = ch2 * mk[k] + 2 * phase * shch * ml[k] + phase2 * sh2 * ml[l] + self.mmat[k][l] = ( + -(shch * np.conj(phase) * mk[k]) + ch2 * ml[k] - sh2 * ml[k] + phase * shch * ml[l] + ) self.mmat[l][k] = self.mmat[k][l] - self.mmat[l][l] = sh2*np.conj(phase2)*mk[k] - 2*shch*np.conj(phase)*ml[k] + ch2*ml[l] + self.mmat[l][l] = ( + sh2 * np.conj(phase2) * mk[k] - 2 * shch * np.conj(phase) * ml[k] + ch2 * ml[l] + ) # Update columns k and l for i in np.delete(np.arange(self.nlen), (k, l)): - self.nmat[k][i] = ch*nk[i] + sh*np.conj(phase)*nl[i] - self.mmat[k][i] = ch*mk[i] + phase*sh*ml[i] - self.nmat[l][i] = -(phase*sh*nk[i]) + ch*nl[i] - self.mmat[l][i] = -(sh*np.conj(phase)*mk[i]) + ch*ml[i] + self.nmat[k][i] = ch * nk[i] + sh * np.conj(phase) * nl[i] + self.mmat[k][i] = ch * mk[i] + phase * sh * ml[i] + self.nmat[l][i] = -(phase * sh * nk[i]) + ch * nl[i] + self.mmat[l][i] = -(sh * np.conj(phase) * mk[i]) + ch * ml[i] # Update rows k and l self.nmat[:, k] = np.conj(self.nmat[k]) @@ -230,11 +252,33 @@ def scovmatxp(self): Said permutation matrix is implemented in the function changebasis(n) where n is the number of modes. """ - mm11 = self.nmat+np.transpose(self.nmat)+self.mmat+np.conj(self.mmat)+np.identity(self.nlen) - mm12 = 1j*(-np.transpose(self.mmat)+np.transpose(np.conj(self.mmat))+np.transpose(self.nmat)-self.nmat) - mm22 = self.nmat+np.transpose(self.nmat)-self.mmat-np.conj(self.mmat)+np.identity(self.nlen) - return np.concatenate((np.concatenate((mm11, mm12), axis=1), - np.concatenate((np.transpose(mm12), mm22), axis=1)), axis=0).real + mm11 = ( + self.nmat + + np.transpose(self.nmat) + + self.mmat + + np.conj(self.mmat) + + np.identity(self.nlen) + ) + mm12 = 1j * ( + -np.transpose(self.mmat) + + np.transpose(np.conj(self.mmat)) + + np.transpose(self.nmat) + - self.nmat + ) + mm22 = ( + self.nmat + + np.transpose(self.nmat) + - self.mmat + - np.conj(self.mmat) + + np.identity(self.nlen) + ) + return np.concatenate( + ( + np.concatenate((mm11, mm12), axis=1), + np.concatenate((np.transpose(mm12), mm22), axis=1), + ), + axis=0, + ).real def smeanxp(self): r"""Constructs and returns the symmetric ordered vector of mean in the xp ordering. @@ -247,9 +291,9 @@ def smeanxp(self): the number of modes. """ nmodes = self.nlen - r = np.empty(2*nmodes) - r[0:nmodes] = 2*self.mean.real - r[nmodes:2*nmodes] = 2*self.mean.imag + r = np.empty(2 * nmodes) + r[0:nmodes] = 2 * self.mean.real + r[nmodes : 2 * nmodes] = 2 * self.mean.imag return r def scovmat(self): @@ -260,10 +304,10 @@ def scovmat(self): def smean(self): r"""the symmetric mean $[q_1,p_1,q_2,p_2,...,q_n,p_n]$""" - r = np.empty(2*self.nlen) + r = np.empty(2 * self.nlen) for i in range(self.nlen): - r[2*i] = 2*self.mean[i].real - r[2*i+1] = 2*self.mean[i].imag + r[2 * i] = 2 * self.mean[i].real + r[2 * i + 1] = 2 * self.mean[i].imag return r def fromsmean(self, r, modes=None): @@ -278,7 +322,7 @@ def fromsmean(self, r, modes=None): mode_list = range(self.nlen) for idx, mode in enumerate(mode_list): - self.mean[mode] = 0.5*(r[2*idx]+1j*r[2*idx+1]) + self.mean[mode] = 0.5 * (r[2 * idx] + 1j * r[2 * idx + 1]) def fromscovmat(self, V, modes=None): r"""Updates the circuit's state when a standard covariance matrix is provided. @@ -288,11 +332,13 @@ def fromscovmat(self, V, modes=None): modes (Sequence): sequence of modes corresponding to the covariance matrix """ if modes is None: - n = len(V)//2 + n = len(V) // 2 modes = np.arange(self.nlen) if n != self.nlen: - raise ValueError("Covariance matrix is the incorrect size, does not match means vector.") + raise ValueError( + "Covariance matrix is the incorrect size, does not match means vector." + ) else: n = len(modes) modes = np.array(modes) @@ -304,8 +350,8 @@ def fromscovmat(self, V, modes=None): VV = np.dot(np.dot(np.transpose(rotmat), V), rotmat) A = VV[0:n, 0:n] - B = VV[0:n, n:2*n] - C = VV[n:2*n, n:2*n] + B = VV[0:n, n : 2 * n] + C = VV[n : 2 * n, n : 2 * n] Bt = np.transpose(B) if n < self.nlen: @@ -315,8 +361,8 @@ def fromscovmat(self, V, modes=None): rows = modes.reshape(-1, 1) cols = modes.reshape(1, -1) - self.nmat[rows, cols] = 0.25*(A+C+1j*(B-Bt)-2*np.identity(n)) - self.mmat[rows, cols] = 0.25*(A-C+1j*(B+Bt)) + self.nmat[rows, cols] = 0.25 * (A + C + 1j * (B - Bt) - 2 * np.identity(n)) + self.mmat[rows, cols] = 0.25 * (A - C + 1j * (B + Bt)) def qmat(self, modes=None): """ Construct the covariance matrix for the Q function""" @@ -326,9 +372,17 @@ def qmat(self, modes=None): rows = np.reshape(modes, [-1, 1]) cols = np.reshape(modes, [1, -1]) - sigmaq = np.concatenate((np.concatenate((self.nmat[rows, cols], np.conjugate(self.mmat[rows, cols])), axis=1), - np.concatenate((self.mmat[rows, cols], np.conjugate(self.nmat[rows, cols])), axis=1)), - axis=0)+np.identity(2*len(modes)) + sigmaq = np.concatenate( + ( + np.concatenate( + (self.nmat[rows, cols], np.conjugate(self.mmat[rows, cols])), axis=1 + ), + np.concatenate( + (self.mmat[rows, cols], np.conjugate(self.nmat[rows, cols])), axis=1 + ), + ), + axis=0, + ) + np.identity(2 * len(modes)) return sigmaq def fidelity_coherent(self, alpha, modes=None): @@ -338,10 +392,12 @@ def fidelity_coherent(self, alpha, modes=None): Q = self.qmat(modes) Qi = np.linalg.inv(Q) - delta = self.mean[modes]-alpha + delta = self.mean[modes] - alpha delta = np.concatenate((delta, np.conjugate(delta))) - return np.sqrt(np.linalg.det(Qi).real)*np.exp(-0.5*np.dot(delta, np.dot(Qi, np.conjugate(delta))).real) + return np.sqrt(np.linalg.det(Qi).real) * np.exp( + -0.5 * np.dot(delta, np.dot(Qi, np.conjugate(delta))).real + ) def fidelity_vacuum(self, modes=None): """fidelity of the current state with the vacuum state""" @@ -354,8 +410,14 @@ def fidelity_vacuum(self, modes=None): def Amat(self): """ Constructs the A matrix from Hamilton's paper""" ######### this needs to be conjugated - sigmaq = np.concatenate((np.concatenate((np.transpose(self.nmat), self.mmat), axis=1), np.concatenate((np.transpose(np.conjugate(self.mmat)), self.nmat), axis=1)), axis=0)+np.identity(2*self.nlen) - return np.dot(ops.xmat(self.nlen), np.identity(2*self.nlen)-np.linalg.inv(sigmaq)) + sigmaq = np.concatenate( + ( + np.concatenate((np.transpose(self.nmat), self.mmat), axis=1), + np.concatenate((np.transpose(np.conjugate(self.mmat)), self.nmat), axis=1), + ), + axis=0, + ) + np.identity(2 * self.nlen) + return np.dot(ops.xmat(self.nlen), np.identity(2 * self.nlen) - np.linalg.inv(sigmaq)) def loss(self, T, k): r"""Implements a loss channel in mode k by amplitude loss amount \sqrt{T} @@ -364,15 +426,15 @@ def loss(self, T, k): raise ValueError("Cannot apply loss channel, mode does not exist") sqrtT = np.sqrt(T) - self.nmat[k] = sqrtT*self.nmat[k] - self.mmat[k] = sqrtT*self.mmat[k] + self.nmat[k] = sqrtT * self.nmat[k] + self.mmat[k] = sqrtT * self.mmat[k] - self.nmat[k][k] = sqrtT*self.nmat[k][k] - self.mmat[k][k] = sqrtT*self.mmat[k][k] + self.nmat[k][k] = sqrtT * self.nmat[k][k] + self.mmat[k][k] = sqrtT * self.mmat[k][k] self.nmat[:, k] = np.conj(self.nmat[k]) self.mmat[:, k] = self.mmat[k] - self.mean[k] = sqrtT*self.mean[k] + self.mean[k] = sqrtT * self.mean[k] def thermal_loss(self, T, nbar, k): r""" Implements the thermal loss channel in mode k by amplitude loss amount \sqrt{T} @@ -382,7 +444,7 @@ def thermal_loss(self, T, nbar, k): raise ValueError("Cannot apply loss channel, mode does not exist") self.loss(T, k) - self.nmat += (1-T)*nbar + self.nmat += (1 - T) * nbar def init_thermal(self, population, mode): """ Initializes a state of mode in a thermal state with the given population""" @@ -392,7 +454,7 @@ def init_thermal(self, population, mode): def is_vacuum(self, tol=0.0): """ Checks if the state is vacuum by calculating its fidelity with vacuum """ fid = self.fidelity_vacuum() - return np.abs(fid-1) <= tol + return np.abs(fid - 1) <= tol def measure_dyne(self, covmat, indices, shots=1): """ Performs the general-dyne measurement specified in covmat, the indices should correspond @@ -401,17 +463,17 @@ def measure_dyne(self, covmat, indices, shots=1): Quantum Continuous Variables: A Primer of Theoretical Methods by Alessio Serafini page 129 """ - if covmat.shape != (2*len(indices), 2*len(indices)): + if covmat.shape != (2 * len(indices), 2 * len(indices)): raise ValueError("Covariance matrix size does not match indices provided") for i in indices: if self.active[i] is None: raise ValueError("Cannot apply homodyne measurement, mode does not exist") - expind = np.concatenate((2*np.array(indices), 2*np.array(indices)+1)) + expind = np.concatenate((2 * np.array(indices), 2 * np.array(indices) + 1)) mp = self.scovmat() (A, B, C) = ops.chop_in_blocks(mp, expind) - V = A-np.dot(np.dot(B, np.linalg.inv(C+covmat)), np.transpose(B)) + V = A - np.dot(np.dot(B, np.linalg.inv(C + covmat)), np.transpose(B)) V1 = ops.reassemble(V, expind) self.fromscovmat(V1) @@ -420,7 +482,7 @@ def measure_dyne(self, covmat, indices, shots=1): vm = np.random.multivariate_normal(vc, C, size=shots) # The next line is a hack in that it only updates conditioned on the first samples value # should still work if shots = 1 - va = va+np.dot(np.dot(B, np.linalg.inv(C+covmat)), vm[0]-vc) + va = va + np.dot(np.dot(B, np.linalg.inv(C + covmat)), vm[0] - vc) va = ops.reassemble_vector(va, expind) self.fromsmean(va) return vm @@ -428,7 +490,7 @@ def measure_dyne(self, covmat, indices, shots=1): def homodyne(self, n, shots=1, eps=0.0002): """ Performs a homodyne measurement by calling measure dyne an giving it the covariance matrix of a squeezed state whose x quadrature has variance eps**2""" - covmat = np.diag(np.array([eps**2, 1./eps**2])) + covmat = np.diag(np.array([eps ** 2, 1.0 / eps ** 2])) res = self.measure_dyne(covmat, [n], shots=shots) return res @@ -437,12 +499,12 @@ def post_select_homodyne(self, n, val, eps=0.0002): """ Performs a homodyne measurement but postelecting on the value vals for mode n """ if self.active[n] is None: raise ValueError("Cannot apply homodyne measurement, mode does not exist") - covmat = np.diag(np.array([eps**2, 1./eps**2])) + covmat = np.diag(np.array([eps ** 2, 1.0 / eps ** 2])) indices = [n] - expind = np.concatenate((2*np.array(indices), 2*np.array(indices)+1)) + expind = np.concatenate((2 * np.array(indices), 2 * np.array(indices) + 1)) mp = self.scovmat() (A, B, C) = ops.chop_in_blocks(mp, expind) - V = A-np.dot(np.dot(B, np.linalg.inv(C+covmat)), np.transpose(B)) + V = A - np.dot(np.dot(B, np.linalg.inv(C + covmat)), np.transpose(B)) V1 = ops.reassemble(V, expind) self.fromscovmat(V1) @@ -450,7 +512,7 @@ def post_select_homodyne(self, n, val, eps=0.0002): (va, vc) = ops.chop_in_blocks_vector(r, expind) vm1 = np.random.normal(vc[1], np.sqrt(C[1][1])) vm = np.array([val, vm1]) - va = va+np.dot(np.dot(B, np.linalg.inv(C+covmat)), vm-vc) + va = va + np.dot(np.dot(B, np.linalg.inv(C + covmat)), vm - vc) va = ops.reassemble_vector(va, expind) self.fromsmean(va) return val @@ -462,17 +524,17 @@ def post_select_heterodyne(self, n, alpha_val): covmat = np.identity(2) indices = [n] - expind = np.concatenate((2*np.array(indices), 2*np.array(indices)+1)) + expind = np.concatenate((2 * np.array(indices), 2 * np.array(indices) + 1)) mp = self.scovmat() (A, B, C) = ops.chop_in_blocks(mp, expind) - V = A-np.dot(np.dot(B, np.linalg.inv(C+covmat)), np.transpose(B)) + V = A - np.dot(np.dot(B, np.linalg.inv(C + covmat)), np.transpose(B)) V1 = ops.reassemble(V, expind) self.fromscovmat(V1) r = self.smean() (va, vc) = ops.chop_in_blocks_vector(r, expind) - vm = 2.0*np.array([np.real(alpha_val), np.imag(alpha_val)]) - va = va+np.dot(np.dot(B, np.linalg.inv(C+covmat)), vm-vc) + vm = 2.0 * np.array([np.real(alpha_val), np.imag(alpha_val)]) + va = va + np.dot(np.dot(B, np.linalg.inv(C + covmat)), vm - vc) va = ops.reassemble_vector(va, expind) self.fromsmean(va) return alpha_val diff --git a/strawberryfields/backends/gaussianbackend/ops.py b/strawberryfields/backends/gaussianbackend/ops.py index da48fb3b1..b7b53b0a6 100644 --- a/strawberryfields/backends/gaussianbackend/ops.py +++ b/strawberryfields/backends/gaussianbackend/ops.py @@ -17,20 +17,25 @@ from scipy.linalg import sqrtm -from thewalrus.quantum import density_matrix_element, is_pure_cov, pure_state_amplitude, state_vector, density_matrix - +from thewalrus.quantum import ( + density_matrix_element, + is_pure_cov, + pure_state_amplitude, + state_vector, + density_matrix, +) def fock_amplitudes_one_mode(alpha, cov, cutoff): """ Returns the Fock space density matrix of gaussian state characterized by a complex displacement alpha and a (symmetric) covariance matrix The Fock ladder ladder goes from 0 to cutoff-1""" - r = 2*np.array([alpha.real, alpha.imag]) + r = 2 * np.array([alpha.real, alpha.imag]) if is_pure_cov(cov): psi = state_vector(r, cov, normalize=True, cutoff=cutoff) rho = np.outer(psi, psi.conj()) return rho - return density_matrix(r, cov, normalize=True, cutoff=cutoff) + return density_matrix(r, cov, normalize=True, cutoff=cutoff) def sm_fidelity(mu1, mu2, cov1, cov2, tol=1e-8): @@ -44,25 +49,27 @@ def sm_fidelity(mu1, mu2, cov1, cov2, tol=1e-8): by 2*1/4 but instead by 2*1/8=0.25 """ # pylint: disable=duplicate-code - v1 = 0.5*cov1 - v2 = 0.5*cov2 - deltar = mu1-mu2 + v1 = 0.5 * cov1 + v2 = 0.5 * cov2 + deltar = mu1 - mu2 n = 1 - W = omega(2*n) + W = omega(2 * n) - si12 = np.linalg.inv(v1+v2) - vaux = np.dot(np.dot(np.transpose(W), si12), 0.25*W+np.dot(v2, np.dot(W, v1))) + si12 = np.linalg.inv(v1 + v2) + vaux = np.dot(np.dot(np.transpose(W), si12), 0.25 * W + np.dot(v2, np.dot(W, v1))) p1 = np.dot(vaux, W) p1 = np.dot(p1, p1) - p1 = np.identity(2*n)+0.25*np.linalg.inv(p1) + p1 = np.identity(2 * n) + 0.25 * np.linalg.inv(p1) if np.linalg.norm(p1) < tol: p1 = np.zeros_like(p1) else: p1 = sqrtm(p1) - p1 = 2*(p1+np.identity(2*n)) + p1 = 2 * (p1 + np.identity(2 * n)) p1 = np.dot(p1, vaux).real - f = np.sqrt(np.linalg.det(si12)*np.linalg.det(p1))*np.exp(-0.25*np.dot(np.dot(deltar, si12), deltar).real) + f = np.sqrt(np.linalg.det(si12) * np.linalg.det(p1)) * np.exp( + -0.25 * np.dot(np.dot(deltar, si12), deltar).real + ) return f @@ -88,7 +95,7 @@ def chop_in_blocks_vector(v, idtodelete): Splits a vector into two vectors, where idtodelete specifies which elements go into vb """ - idtokeep = list(set(np.arange(len(v)))-set(idtodelete)) + idtokeep = list(set(np.arange(len(v))) - set(idtodelete)) va = v[idtokeep] vb = v[idtodelete] return (va, vb) @@ -100,8 +107,8 @@ def reassemble(A, idtodelete): dim(A)+len(idtodelete) The empty space are filled with zeros (offdiagonal) and ones (diagonals) """ - ntot = len(A)+len(idtodelete) - ind = set(np.arange(ntot))-set(idtodelete) + ntot = len(A) + len(idtodelete) + ind = set(np.arange(ntot)) - set(idtodelete) newmat = np.zeros((ntot, ntot)) for i, i1 in enumerate(ind): for j, j1 in enumerate(ind): @@ -116,8 +123,8 @@ def reassemble_vector(va, idtodelete): r"""Creates a vector with zeros indices idtodelete and everywhere else it puts the entries of va """ - ntot = len(va)+len(idtodelete) - ind = set(np.arange(ntot))-set(idtodelete) + ntot = len(va) + len(idtodelete) + ind = set(np.arange(ntot)) - set(idtodelete) newv = np.zeros(ntot) for j, j1 in enumerate(ind): newv[j1] = va[j] @@ -129,15 +136,16 @@ def omega(n): x = np.zeros(n) x[0::2] = 1 A = np.diag(x[0:-1], 1) - W = A-np.transpose(A) + W = A - np.transpose(A) return W def xmat(n): """ Returns the matrix ((0, I_n), (I, 0_n))""" idm = np.identity(n) - return np.concatenate((np.concatenate((0*idm, idm), axis=1), - np.concatenate((idm, 0*idm), axis=1)), axis=0).real + return np.concatenate( + (np.concatenate((0 * idm, idm), axis=1), np.concatenate((idm, 0 * idm), axis=1)), axis=0 + ).real def fock_prob(mu, cov, ocp): @@ -145,6 +153,6 @@ def fock_prob(mu, cov, ocp): Calculates the probability of measuring the gaussian state s2 in the photon number occupation pattern ocp""" if is_pure_cov(cov): - return np.abs(pure_state_amplitude(mu, cov, ocp, check_purity=False))**2 + return np.abs(pure_state_amplitude(mu, cov, ocp, check_purity=False)) ** 2 - return density_matrix_element(mu, cov, list(ocp), list(ocp)).real + return density_matrix_element(mu, cov, list(ocp), list(ocp)).real diff --git a/strawberryfields/backends/gaussianbackend/states.py b/strawberryfields/backends/gaussianbackend/states.py index f03df46c6..c82c2f41a 100644 --- a/strawberryfields/backends/gaussianbackend/states.py +++ b/strawberryfields/backends/gaussianbackend/states.py @@ -32,6 +32,7 @@ class GaussianState(BaseGaussianState): mode_names (Sequence): (optional) this argument contains a list providing mode names for each mode in the state """ + def __init__(self, state_data, num_modes, qmat, Amat, mode_names=None): # pylint: disable=too-many-arguments super().__init__(state_data, num_modes, mode_names) @@ -53,13 +54,13 @@ def reduced_dm(self, modes, **kwargs): Returns: array: the reduced density matrix for the specified modes """ - cutoff = kwargs.get('cutoff', 10) - mu, cov = self.reduced_gaussian([modes]) # pylint: disable=unused-variable - cov = cov * 2/self._hbar + cutoff = kwargs.get("cutoff", 10) + mu, cov = self.reduced_gaussian([modes]) # pylint: disable=unused-variable + cov = cov * 2 / self._hbar return fock_amplitudes_one_mode(self._alpha[modes], cov, cutoff) - #========================================== + # ========================================== # The methods below inherit their docstring # from BaseState @@ -67,32 +68,32 @@ def fock_prob(self, n, **kwargs): if len(n) != self._modes: raise ValueError("Fock state must be same length as the number of modes") - cutoff = kwargs.get('cutoff', 10) + cutoff = kwargs.get("cutoff", 10) if sum(n) >= cutoff: - raise ValueError('Cutoff argument must be larger than the sum of photon numbers') + raise ValueError("Cutoff argument must be larger than the sum of photon numbers") ocp = np.uint8(np.array(n)) # get the vector and means and covariance # matrix, and normalize so that hbar=2 - mu = self.means() * np.sqrt(2/self.hbar) - cov = self.cov() * 2/self.hbar + mu = self.means() * np.sqrt(2 / self.hbar) + cov = self.cov() * 2 / self.hbar return fock_prob(mu, cov, ocp) def mean_photon(self, mode, **kwargs): mu, cov = self.reduced_gaussian([mode]) - mean = (np.trace(cov) + mu.T @ mu)/(2*self._hbar) - 1/2 - var = (np.trace(cov @ cov) + 2*mu.T @ cov @ mu)/(2*self._hbar**2) - 1/4 + mean = (np.trace(cov) + mu.T @ mu) / (2 * self._hbar) - 1 / 2 + var = (np.trace(cov @ cov) + 2 * mu.T @ cov @ mu) / (2 * self._hbar ** 2) - 1 / 4 return mean, var def fidelity(self, other_state, mode, **kwargs): - mu1 = other_state[0] * 2/np.sqrt(2*self._hbar) - cov1 = other_state[1] * 2/self._hbar + mu1 = other_state[0] * 2 / np.sqrt(2 * self._hbar) + cov1 = other_state[1] * 2 / self._hbar mu2, cov2 = self.reduced_gaussian([mode]) - mu2 *= 2/np.sqrt(2*self._hbar) - cov2 /= self._hbar/2 + mu2 *= 2 / np.sqrt(2 * self._hbar) + cov2 /= self._hbar / 2 return sm_fidelity(mu1, mu2, cov1, cov2) @@ -109,5 +110,5 @@ def fidelity_coherent(self, alpha_list, **kwargs): delta = np.concatenate((delta, delta.conj())) fac = np.sqrt(np.linalg.det(Qi).real) - exp = np.exp(-0.5*np.dot(delta, np.dot(Qi, delta.conj())).real) - return fac*exp + exp = np.exp(-0.5 * np.dot(delta, np.dot(Qi, delta.conj())).real) + return fac * exp diff --git a/strawberryfields/backends/shared_ops.py b/strawberryfields/backends/shared_ops.py index 540f44545..ef8197d79 100644 --- a/strawberryfields/backends/shared_ops.py +++ b/strawberryfields/backends/shared_ops.py @@ -26,13 +26,14 @@ from scipy.special import gammaln as lg from scipy.linalg import qr -DATA_PATH = pkg_resources.resource_filename('strawberryfields', 'backends/data') +DATA_PATH = pkg_resources.resource_filename("strawberryfields", "backends/data") def_type = np.complex128 -#================================+ +# ================================+ # Fock space shared operations | -#================================+ +# ================================+ + @functools.lru_cache() def find_dim_files(regex, D, directory=None, name=""): @@ -52,16 +53,18 @@ def find_dim_files(regex, D, directory=None, name=""): directory = DATA_PATH else: check_dir = os.path.isdir(directory) - if not check_dir: # pragma: no cover + if not check_dir: # pragma: no cover raise NotADirectoryError("Directory {} does not exist!".format(directory)) files = [f for f in os.listdir(directory) if re.match(regex, f)] - avail_dims = sorted([int(re.findall(r'\d+', f)[0]) for f in files]) + avail_dims = sorted([int(re.findall(r"\d+", f)[0]) for f in files]) - idx = bisect(avail_dims, D-1) - if idx+1 > len(avail_dims): - raise FileNotFoundError("File containing {} factors does not exist " - "for dimension {} in directory {}".format(name, D, directory)) + idx = bisect(avail_dims, D - 1) + if idx + 1 > len(avail_dims): + raise FileNotFoundError( + "File containing {} factors does not exist " + "for dimension {} in directory {}".format(name, D, directory) + ) return avail_dims[idx], directory @@ -83,15 +86,16 @@ def generate_bs_factors(D): Args: D (int): generate prefactors for :math:`D` dimensions. """ - prefac = np.zeros([D]*5, dtype=def_type) + prefac = np.zeros([D] * 5, dtype=def_type) - for (N, M, n) in itertools.product(*([range(D)]*3)): - m = N+M-n - k = np.arange(n+1) + for (N, M, n) in itertools.product(*([range(D)] * 3)): + m = N + M - n + k = np.arange(n + 1) if 0 <= m < D: # pylint: disable=bad-whitespace - prefac[N,n,M,m,:n+1] = (-1.0)**(N-k) \ - * np.sqrt(binom(n, k)*binom(m, N-k)*binom(N, k)*binom(M, n-k)) + prefac[N, n, M, m, : n + 1] = (-1.0) ** (N - k) * np.sqrt( + binom(n, k) * binom(m, N - k) * binom(N, k) * binom(M, n - k) + ) return prefac @@ -118,7 +122,7 @@ def load_bs_factors(D, directory=None): load_dim, location = find_dim_files(regex, D, directory=directory, name="beamsplitter") filename = "fock_beamsplitter_factors_{}.npz".format(load_dim) prefac = sp.sparse.load_npz(os.path.join(location, filename)) - return np.reshape(prefac.toarray(), [load_dim]*5) + return np.reshape(prefac.toarray(), [load_dim] * 5) def save_bs_factors(prefac, directory=None): @@ -139,13 +143,13 @@ def save_bs_factors(prefac, directory=None): directory = DATA_PATH else: check_dir = os.path.isdir(directory) - if not check_dir: # pragma: no cover + if not check_dir: # pragma: no cover raise NotADirectoryError("Directory {} does not exist!".format(directory)) D = prefac.shape[0] - filename = 'fock_beamsplitter_factors_{}.npz'.format(D) + filename = "fock_beamsplitter_factors_{}.npz".format(D) - prefac_rank2 = np.reshape(prefac, ((D)**4, D), order='C') + prefac_rank2 = np.reshape(prefac, ((D) ** 4, D), order="C") prefac_sparse = sp.sparse.csc_matrix(prefac_rank2) sp.sparse.save_npz(os.path.join(directory, filename), prefac_sparse) @@ -164,7 +168,7 @@ def squeeze_parity(D): Args: D (numpy.array): generate the prefactors for a Fock truncation of :math:`D`. """ - k = np.int(np.ceil(D/4) * 4) + k = np.int(np.ceil(D / 4) * 4) v = np.full(k, 1) v[1::2] = 0 v[2::4] = -1 @@ -194,17 +198,21 @@ def generate_squeeze_factors(D): # we only perform the sum when n+N is divisible by 2 # in which case we sum 0 <= k <= min(N,n) - mask = np.logical_and((n+N)%2 == 0, k <= np.minimum(N, n)) + mask = np.logical_and((n + N) % 2 == 0, k <= np.minimum(N, n)) # need to use np.power to avoid taking the root of a negative # in the numerator (these are discarded by the mask anyway) signs = squeeze_parity(D).reshape([D, 1, D]) - logfac = np.where(mask, 0.5*(lg(n+1) + lg(N+1)) - lg(k+1) - lg((n-k)/2+1) - lg((N-k)/2+1), 0) + logfac = np.where( + mask, + 0.5 * (lg(n + 1) + lg(N + 1)) - lg(k + 1) - lg((n - k) / 2 + 1) - lg((N - k) / 2 + 1), + 0, + ) if D <= 600: - prefactor = np.exp(logfac, dtype=np.float64)*signs*mask + prefactor = np.exp(logfac, dtype=np.float64) * signs * mask else: - prefactor = np.exp(logfac, dtype=np.float128)*signs*mask + prefactor = np.exp(logfac, dtype=np.float128) * signs * mask return prefactor @@ -231,9 +239,9 @@ def save_squeeze_factors(prefac, directory=None): raise NotADirectoryError("Directory {} does not exist!".format(directory)) D = prefac.shape[0] - filename = 'fock_squeeze_factors_{}.npz'.format(D) + filename = "fock_squeeze_factors_{}.npz".format(D) - prefac_rank2 = np.reshape(prefac, ((D)**2, D), order='C') + prefac_rank2 = np.reshape(prefac, ((D) ** 2, D), order="C") prefac_sparse = sp.sparse.csc_matrix(prefac_rank2) sp.sparse.save_npz(os.path.join(directory, filename), prefac_sparse) @@ -262,12 +270,13 @@ def load_squeeze_factors(D, directory=None): filename = "fock_squeeze_factors_{}.npz".format(load_dim) prefac = sp.sparse.load_npz(os.path.join(location, filename)) - return np.reshape(prefac.toarray(), [load_dim]*3) + return np.reshape(prefac.toarray(), [load_dim] * 3) -#================================+ +# ================================+ # Phase space shared operations | -#================================+ +# ================================+ + @functools.lru_cache() def rotation_matrix(phi): @@ -278,8 +287,7 @@ def rotation_matrix(phi): Returns: array: :math:`2\times 2` rotation matrix """ - return np.array([[np.cos(phi), -np.sin(phi)], - [np.sin(phi), np.cos(phi)]]) + return np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]]) @functools.lru_cache() @@ -294,8 +302,9 @@ def sympmat(n): array: symplectic matrix """ idm = np.identity(n) - omega = np.concatenate((np.concatenate((0*idm, idm), axis=1), - np.concatenate((-idm, 0*idm), axis=1)), axis=0) + omega = np.concatenate( + (np.concatenate((0 * idm, idm), axis=1), np.concatenate((-idm, 0 * idm), axis=1)), axis=0 + ) return omega @@ -311,10 +320,10 @@ def changebasis(n): Returns: array: :math:`2n\times 2n` matrix """ - m = np.zeros((2*n, 2*n)) + m = np.zeros((2 * n, 2 * n)) for i in range(n): - m[2*i, i] = 1 - m[2*i+1, i+n] = 1 + m[2 * i, i] = 1 + m[2 * i + 1, i + n] = 1 return m @@ -328,9 +337,9 @@ def haar_measure(n): Returns: array: an nxn random matrix """ - z = (sp.randn(n, n) + 1j*sp.randn(n, n))/np.sqrt(2.0) + z = (sp.randn(n, n) + 1j * sp.randn(n, n)) / np.sqrt(2.0) q, r = qr(z) d = sp.diagonal(r) - ph = d/np.abs(d) + ph = d / np.abs(d) q = np.multiply(q, ph, q) return q diff --git a/strawberryfields/backends/states.py b/strawberryfields/backends/states.py index 2697b69e2..08407eea6 100644 --- a/strawberryfields/backends/states.py +++ b/strawberryfields/backends/states.py @@ -32,6 +32,7 @@ indices = string.ascii_lowercase + class BaseState(abc.ABC): r"""Abstract base class for the representation of quantum states.""" EQ_TOLERANCE = 1e-10 @@ -43,12 +44,13 @@ def __init__(self, num_modes, mode_names=None): self._pure = None if mode_names is None: - self._modemap = {i:"mode {}".format(i) for i in range(num_modes)} + self._modemap = {i: "mode {}".format(i) for i in range(num_modes)} else: - self._modemap = {i:'{}'.format(j) for i, j in zip(range(num_modes), mode_names)} + self._modemap = {i: "{}".format(j) for i, j in zip(range(num_modes), mode_names)} self._str = "".format( - self.num_modes, self._pure, self._hbar) + self.num_modes, self._pure, self._hbar + ) def __str__(self): return self._str @@ -326,7 +328,7 @@ def p_quad_values(self, mode, xvec, pvec): W = self.wigner(mode, xvec, pvec) y = [] for i in range(0, len(pvec)): - res = simps(W[i, :len(xvec)], xvec) + res = simps(W[i, : len(xvec)], xvec) y.append(res) return np.array(y) @@ -347,7 +349,7 @@ def x_quad_values(self, mode, xvec, pvec): W = self.wigner(mode, xvec, pvec) y = [] for i in range(0, len(xvec)): - res = simps(W[:len(pvec), i], pvec) + res = simps(W[: len(pvec), i], pvec) y.append(res) return np.array(y) @@ -372,10 +374,11 @@ def __init__(self, state_data, num_modes, pure, cutoff_dim, mode_names=None): self._data = state_data self._cutoff = cutoff_dim self._pure = pure - self._basis = 'fock' + self._basis = "fock" self._str = "".format( - self.num_modes, self._cutoff, self._pure, self._hbar) + self.num_modes, self._cutoff, self._pure, self._hbar + ) def __eq__(self, other): """Equality operator for BaseFockState. @@ -422,7 +425,7 @@ def ket(self, **kwargs): if self._pure: return self.data - return None # pragma: no cover + return None # pragma: no cover def dm(self, **kwargs): r"""The numerical density matrix for the quantum state. @@ -435,7 +438,7 @@ def dm(self, **kwargs): left_str = [indices[i] for i in range(0, 2 * self._modes, 2)] right_str = [indices[i] for i in range(1, 2 * self._modes, 2)] out_str = [indices[: 2 * self._modes]] - einstr = ''.join(left_str + [','] + right_str + ['->'] + out_str) + einstr = "".join(left_str + [","] + right_str + ["->"] + out_str) rho = np.einsum(einstr, self.ket(), self.ket().conj()) return rho @@ -457,8 +460,12 @@ def trace(self, **kwargs): return np.vdot(self.ket(), self.ket()).real # # need some extra steps to trace over multimode matrices - eqn_indices = [[indices[idx]] * 2 for idx in range(self._modes)] #doubled indices [['i','i'],['j','j'], ... ] - eqn = "".join(chain.from_iterable(eqn_indices)) # flatten indices into a single string 'iijj...' + eqn_indices = [ + [indices[idx]] * 2 for idx in range(self._modes) + ] # doubled indices [['i','i'],['j','j'], ... ] + eqn = "".join( + chain.from_iterable(eqn_indices) + ) # flatten indices into a single string 'iijj...' return np.einsum(eqn, self.dm()).real def all_fock_probs(self, **kwargs): @@ -478,8 +485,8 @@ def all_fock_probs(self, **kwargs): """ # pylint: disable=unused-argument if self._pure: - s = np.ravel(self.ket()) # into 1D array - return np.reshape((s * s.conj()).real, [self._cutoff]*self._modes) + s = np.ravel(self.ket()) # into 1D array + return np.reshape((s * s.conj()).real, [self._cutoff] * self._modes) s = self.dm() num_axes = len(s.shape) @@ -489,16 +496,16 @@ def all_fock_probs(self, **kwargs): transpose_list = evens + odds probs = np.diag(np.reshape(np.transpose(s, transpose_list), [flat_size, flat_size])).real - return np.reshape(probs, [self._cutoff]*self._modes) + return np.reshape(probs, [self._cutoff] * self._modes) - #===================================================== + # ===================================================== # the following methods are overwritten from BaseState def reduced_dm(self, modes, **kwargs): # pylint: disable=unused-argument if modes == list(range(self._modes)): # reduced state is full state - return self.dm() # pragma: no cover + return self.dm() # pragma: no cover if isinstance(modes, int): modes = [modes] @@ -506,8 +513,9 @@ def reduced_dm(self, modes, **kwargs): raise ValueError("The specified modes cannot be duplicated.") if len(modes) > self._modes: - raise ValueError("The number of specified modes cannot " - "be larger than the number of subsystems.") + raise ValueError( + "The number of specified modes cannot " "be larger than the number of subsystems." + ) # reduce rho down to specified subsystems keep_indices = indices[: 2 * len(modes)] @@ -521,7 +529,7 @@ def reduced_dm(self, modes, **kwargs): ind.insert(m, keep_indices[2 * ctr : 2 * (ctr + 1)]) ctr += 1 - indStr = ''.join(ind) + '->' + keep_indices + indStr = "".join(ind) + "->" + keep_indices return np.einsum(indStr, self.dm()) def fock_prob(self, n, **kwargs): @@ -533,16 +541,16 @@ def fock_prob(self, n, **kwargs): raise ValueError("Can't get distribution beyond truncation level") if self._pure: - return np.abs(self.ket()[tuple(n)])**2 + return np.abs(self.ket()[tuple(n)]) ** 2 - return self.dm()[tuple([n[i//2] for i in range(len(n)*2)])].real + return self.dm()[tuple([n[i // 2] for i in range(len(n) * 2)])].real def mean_photon(self, mode, **kwargs): # pylint: disable=unused-argument n = np.arange(self._cutoff) probs = np.diagonal(self.reduced_dm(mode)) - mean = np.sum(n*probs).real - var = np.sum(n**2*probs).real - mean**2 + mean = np.sum(n * probs).real + var = np.sum(n ** 2 * probs).real - mean ** 2 return mean, var def fidelity(self, other_state, mode, **kwargs): @@ -553,10 +561,10 @@ def fidelity(self, other_state, mode, **kwargs): raise Exception("fidelity method can only support up to {} modes".format(max_indices)) left_indices = indices[:mode] - eqn_left = "".join([i*2 for i in left_indices]) - reduced_dm_indices = indices[mode:mode + 2] - right_indices = indices[mode + 2:self._modes + 1] - eqn_right = "".join([i*2 for i in right_indices]) + eqn_left = "".join([i * 2 for i in left_indices]) + reduced_dm_indices = indices[mode : mode + 2] + right_indices = indices[mode + 2 : self._modes + 1] + eqn_right = "".join([i * 2 for i in right_indices]) eqn = "".join([eqn_left, reduced_dm_indices, eqn_right]) + "->" + reduced_dm_indices rho_reduced = np.einsum(eqn, self.dm()) @@ -577,7 +585,7 @@ def fidelity_coherent(self, alpha_list, **kwargs): s = self.dm() if not hasattr(alpha_list, "__len__"): - alpha_list = [alpha_list] # pragma: no cover + alpha_list = [alpha_list] # pragma: no cover if len(alpha_list) != self._modes: raise ValueError("The number of alpha values must match the number of modes.") @@ -589,16 +597,20 @@ def fidelity_coherent(self, alpha_list, **kwargs): if self._modes == 1: multi_cohs_vec = coh(alpha_list[0], self._cutoff) else: - multi_cohs_list = [coh(alpha_list[idx], dim) for idx, dim in enumerate(s.shape[::mode_size])] - eqn = ",".join(indices[:self._modes]) + "->" + indices[:self._modes] - multi_cohs_vec = np.einsum(eqn, *multi_cohs_list) # tensor product of specified coherent states + multi_cohs_list = [ + coh(alpha_list[idx], dim) for idx, dim in enumerate(s.shape[::mode_size]) + ] + eqn = ",".join(indices[: self._modes]) + "->" + indices[: self._modes] + multi_cohs_vec = np.einsum( + eqn, *multi_cohs_list + ) # tensor product of specified coherent states if self.is_pure: ovlap = np.vdot(multi_cohs_vec, s) return np.abs(ovlap) ** 2 - bra_indices = indices[:2 * self._modes:2] - ket_indices = indices[1:2 * self._modes:2] + bra_indices = indices[: 2 * self._modes : 2] + ket_indices = indices[1 : 2 * self._modes : 2] new_eqn_lhs = ",".join([bra_indices, ket_indices]) new_eqn_rhs = "".join(bra_indices[idx] + ket_indices[idx] for idx in range(self._modes)) outer_prod_eqn = new_eqn_lhs + "->" + new_eqn_rhs @@ -630,12 +642,12 @@ def wigner(self, mode, xvec, pvec): """ rho = self.reduced_dm(mode) Q, P = np.meshgrid(xvec, pvec) - A = (Q + P * 1.0j) / (2*np.sqrt(self._hbar/2)) + A = (Q + P * 1.0j) / (2 * np.sqrt(self._hbar / 2)) Wlist = np.array([np.zeros(np.shape(A), dtype=complex) for k in range(self._cutoff)]) # Wigner function for |0><0| - Wlist[0] = np.exp(-2.0 * np.abs(A)**2) / np.pi + Wlist[0] = np.exp(-2.0 * np.abs(A) ** 2) / np.pi # W = rho(0,0)W(|0><0|) W = np.real(rho[0, 0]) * np.real(Wlist[0]) @@ -647,8 +659,7 @@ def wigner(self, mode, xvec, pvec): for m in range(1, self._cutoff): temp = copy(Wlist[m]) # Wlist[m] = Wigner function for |m>ijklmn' (for 3 modes) return np.einsum(ind, *ops) # determine modes with quadratic expectation values - nonzero = np.concatenate([np.mod(A.nonzero()[0], self._modes), np.mod(linear_coeff.nonzero()[0], self._modes)]) + nonzero = np.concatenate( + [np.mod(A.nonzero()[0], self._modes), np.mod(linear_coeff.nonzero()[0], self._modes)] + ) ex_modes = list(set(nonzero)) num_modes = len(ex_modes) if not ex_modes: # only a constant term was provided - return k, 0. + return k, 0.0 # There are non-zero elements of A and/or d # therefore there are quadratic and/or linear terms. @@ -747,14 +760,14 @@ def expand_dims(op, n, modes): # generate vector of quadrature operators # this array will have shape [2*num_modes] + [dim]*(2*num_modes) - r = np.empty([2*num_modes] + [dim]*(2*num_modes), dtype=np.complex128) + r = np.empty([2 * num_modes] + [dim] * (2 * num_modes), dtype=np.complex128) for n in range(num_modes): r[n] = expand_dims(x, n, num_modes) - r[num_modes+n] = expand_dims(p, n, num_modes) + r[num_modes + n] = expand_dims(p, n, num_modes) # reduce the size of A so that we only consider modes # which we need to calculate the expectation value for - rows = ex_modes + [i+self._modes for i in ex_modes] + rows = ex_modes + [i + self._modes for i in ex_modes] quad_coeffs = A[:, rows][rows] quad_coeffs[num_modes:, :num_modes] = -quad_coeffs[num_modes:, :num_modes] quad_coeffs[:num_modes, num_modes:] = -quad_coeffs[:num_modes, num_modes:] @@ -771,13 +784,18 @@ def expand_dims(op, n, modes): # So, in effect, matrix of quadratic coefficients A acts only on index a, # this index is then summed, and then each mode of r, A@r undergoes # matrix multiplication - ind1 = indices[:2*num_modes+1] - ind2 = ind1[0] + ''.join([str(i)+str(j) for i, j in zip(ind1[2::2], indices[2*num_modes+1:3*num_modes+1])]) - ind3 = ''.join([str(i)+str(j) for i, j in zip(ind1[1::2], ind2[2::2])]) + ind1 = indices[: 2 * num_modes + 1] + ind2 = ind1[0] + "".join( + [ + str(i) + str(j) + for i, j in zip(ind1[2::2], indices[2 * num_modes + 1 : 3 * num_modes + 1]) + ] + ) + ind3 = "".join([str(i) + str(j) for i, j in zip(ind1[1::2], ind2[2::2])]) ind = "{},{}->{}".format(ind1, ind2, ind3) - if np.allclose(quad_coeffs, 0.): - poly_op = np.zeros([dim]*(2*num_modes), dtype=np.complex128) + if np.allclose(quad_coeffs, 0.0): + poly_op = np.zeros([dim] * (2 * num_modes), dtype=np.complex128) else: # Einsum above applied to to r,Ar # This einsum sums over all quadrature operators, and also applies matrix @@ -790,19 +808,19 @@ def expand_dims(op, n, modes): # add constant term if k != 0: - poly_op += k*expand_dims(np.eye(dim), 0, num_modes) + poly_op += k * expand_dims(np.eye(dim), 0, num_modes) # calculate Op^2 ind = "{},{}->{}".format(ind1[1:], ind2[1:], ind3) poly_op_sq = np.einsum(ind, poly_op, poly_op) # truncate down - sl = tuple([slice(0, self._cutoff)]*(2*num_modes)) + sl = tuple([slice(0, self._cutoff)] * (2 * num_modes)) poly_op = poly_op[sl] poly_op_sq = poly_op_sq[sl] ind1 = ind1[:-1] - ind2 = ''.join([str(j)+str(i) for i, j in zip(ind1[::2], ind1[1::2])]) + ind2 = "".join([str(j) + str(i) for i, j in zip(ind1[::2], ind1[1::2])]) ind = "{},{}".format(ind1, ind2) # calculate expectation value, Tr(Op @ rho) @@ -810,7 +828,7 @@ def expand_dims(op, n, modes): mean = np.einsum(ind, poly_op, rho).real # calculate variance Tr(Op^2 @ rho) - Tr(Op @ rho)^2 - var = np.einsum(ind, poly_op_sq, rho).real - mean**2 + var = np.einsum(ind, poly_op_sq, rho).real - mean ** 2 return mean, var @@ -830,23 +848,28 @@ class BaseGaussianState(BaseState): mode_names (Sequence): (optional) this argument contains a list providing mode names for each mode in the state """ + def __init__(self, state_data, num_modes, mode_names=None): super().__init__(num_modes, mode_names) self._data = state_data # vector of means and covariance matrix, using frontend x,p scaling - self._mu = self._data[0] * np.sqrt(self._hbar/2) - self._cov = self._data[1] * (self._hbar/2) + self._mu = self._data[0] * np.sqrt(self._hbar / 2) + self._cov = self._data[1] * (self._hbar / 2) # complex displacements of the Gaussian state - self._alpha = self._mu[:self._modes] + 1j*self._mu[self._modes:] - self._alpha /= np.sqrt(2*self._hbar) + self._alpha = self._mu[: self._modes] + 1j * self._mu[self._modes :] + self._alpha /= np.sqrt(2 * self._hbar) - self._pure = np.abs(np.linalg.det(self._cov) - (self._hbar/2)**(2*self._modes)) < self.EQ_TOLERANCE + self._pure = ( + np.abs(np.linalg.det(self._cov) - (self._hbar / 2) ** (2 * self._modes)) + < self.EQ_TOLERANCE + ) - self._basis = 'gaussian' + self._basis = "gaussian" self._str = "".format( - self.num_modes, self._pure, self._hbar) + self.num_modes, self._pure, self._hbar + ) def __eq__(self, other): """Equality operator for BaseGaussianState. @@ -858,15 +881,16 @@ def __eq__(self, other): Args: other (BaseGaussianState): BaseGaussianState to compare against. """ - #pylint: disable=protected-access + # pylint: disable=protected-access if not isinstance(other, type(self)): return False if self.num_modes != other.num_modes: return False - if np.allclose(self._mu, other._mu, atol=self.EQ_TOLERANCE, rtol=0) and \ - np.allclose(self._cov, other._cov, atol=self.EQ_TOLERANCE, rtol=0): + if np.allclose(self._mu, other._mu, atol=self.EQ_TOLERANCE, rtol=0) and np.allclose( + self._cov, other._cov, atol=self.EQ_TOLERANCE, rtol=0 + ): return True return False @@ -933,10 +957,11 @@ def reduced_gaussian(self, modes): raise ValueError("The specified modes cannot be duplicated.") if len(modes) > self._modes: - raise ValueError("The number of specified modes cannot " - "be larger than the number of subsystems.") + raise ValueError( + "The number of specified modes cannot " "be larger than the number of subsystems." + ) - ind = np.concatenate([np.array(modes), np.array(modes)+self._modes]) + ind = np.concatenate([np.array(modes), np.array(modes) + self._modes]) rows = ind.reshape(-1, 1) cols = ind.reshape(1, -1) @@ -955,8 +980,8 @@ def is_coherent(self, mode, tol=1e-10): Returns: bool: True if and only if the state is a coherent state. """ - mu, cov = self.reduced_gaussian([mode]) # pylint: disable=unused-variable - cov /= self._hbar/2 + mu, cov = self.reduced_gaussian([mode]) # pylint: disable=unused-variable + cov /= self._hbar / 2 return np.allclose(cov, np.identity(2), atol=tol, rtol=0) def displacement(self, modes=None): @@ -971,7 +996,7 @@ def displacement(self, modes=None): """ if modes is None: modes = list(range(self._modes)) - elif isinstance(modes, int): # pragma: no cover + elif isinstance(modes, int): # pragma: no cover modes = [modes] return self._alpha[list(modes)] @@ -986,8 +1011,8 @@ def is_squeezed(self, mode, tol=1e-6): Returns: bool: True if and only if the state is a squeezed state. """ - mu, cov = self.reduced_gaussian([mode]) # pylint: disable=unused-variable - cov /= self._hbar/2 + mu, cov = self.reduced_gaussian([mode]) # pylint: disable=unused-variable + cov /= self._hbar / 2 return np.any(np.abs(cov - np.identity(2)) > tol) def squeezing(self, modes=None): @@ -1002,27 +1027,27 @@ def squeezing(self, modes=None): """ if modes is None: modes = list(range(self._modes)) - elif isinstance(modes, int): # pragma: no cover + elif isinstance(modes, int): # pragma: no cover modes = [modes] res = [] for i in modes: - mu, cov = self.reduced_gaussian([i]) # pylint: disable=unused-variable - cov /= self._hbar/2 + mu, cov = self.reduced_gaussian([i]) # pylint: disable=unused-variable + cov /= self._hbar / 2 tr = np.trace(cov) - r = np.arccosh(tr/2)/2 + r = np.arccosh(tr / 2) / 2 - if cov[0, 1] == 0.: + if cov[0, 1] == 0.0: phi = 0 else: - phi = -np.arcsin(2*cov[0, 1] / np.sqrt((tr-2)*(tr+2))) + phi = -np.arcsin(2 * cov[0, 1] / np.sqrt((tr - 2) * (tr + 2))) res.append((r, phi)) return res - #===================================================== + # ===================================================== # the following methods are overwritten from BaseState def wigner(self, mode, xvec, pvec): @@ -1047,33 +1072,35 @@ def quad_expectation(self, mode, phi=0, **kwargs): def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs): if A is None: - A = np.zeros([2*self._modes, 2*self._modes]) + A = np.zeros([2 * self._modes, 2 * self._modes]) - if A.shape != (2*self._modes, 2*self._modes): + if A.shape != (2 * self._modes, 2 * self._modes): raise ValueError("Matrix of quadratic coefficients A must be of size 2Nx2N.") if not np.allclose(A.T, A): raise ValueError("Matrix of quadratic coefficients A must be symmetric.") if d is not None: - if d.shape != (2*self._modes,): + if d.shape != (2 * self._modes,): raise ValueError("Vector of linear coefficients d must be of length 2N.") else: - d = np.zeros([2*self._modes]) + d = np.zeros([2 * self._modes]) # determine modes with quadratic expectation values - nonzero = np.concatenate([np.mod(A.nonzero()[0], self._modes), np.mod(d.nonzero()[0], self._modes)]) + nonzero = np.concatenate( + [np.mod(A.nonzero()[0], self._modes), np.mod(d.nonzero()[0], self._modes)] + ) ex_modes = list(set(nonzero)) # reduce the size of A so that we only consider modes # which we need to calculate the expectation value for - rows = ex_modes + [i+self._modes for i in ex_modes] + rows = ex_modes + [i + self._modes for i in ex_modes] num_modes = len(ex_modes) quad_coeffs = A[:, rows][rows] if not ex_modes: # only a constant term was provided - return k, 0. + return k, 0.0 mu = self._mu cov = self._cov @@ -1082,7 +1109,7 @@ def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs): # rotate all modes of the covariance matrix and vector of means R = _R(phi) C = changebasis(self._modes) - rot = C.T @ block_diag(*([R]*self._modes)) @ C + rot = C.T @ block_diag(*([R] * self._modes)) @ C mu = rot.T @ mu cov = rot.T @ cov @ rot @@ -1091,7 +1118,7 @@ def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs): # E[P(r)]_(mu,cov) = E(Q(r+mu)]_(0,cov) # = E[rT.A.r + rT.(2A.mu+d) + (muT.A.mu+muT.d+cI)]_(0,cov) # = E[rT.A.r + rT.d' + k']_(0,cov) - d2 = 2*A @ mu + d + d2 = 2 * A @ mu + d k2 = mu.T @ A @ mu + mu.T @ d + k # expectation value E[P(r)]_{mu=0} = tr(A.cov) + muT.A.mu + muT.d + k|_{mu=0} @@ -1099,14 +1126,16 @@ def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs): mean = np.trace(A @ cov) + k2 # variance Var[P(r)]_{mu=0} = 2tr(A.cov.A.cov) + 4*muT.A.cov.A.mu + dT.cov.d|_{mu=0} # = 2tr(A.cov.A.cov) + dT.cov.d - var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2 + var = 2 * np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2 # Correction term to account for incorrect symmetric ordering in the variance. # This occurs because Var[S(P(r))] = Var[P(r)] - Σ_{m1, m2} |hbar*A_{(m1, m1+N),(m2, m2+N)}|, # where m1, m2 are all possible mode numbers, and N is the total number of modes. # Therefore, the correction term is the sum of the determinants of 2x2 submatrices of A. - modes = np.arange(2*num_modes).reshape(2, -1).T - var -= np.sum([np.linalg.det(self._hbar*quad_coeffs[:, m][n]) for m in modes for n in modes]) + modes = np.arange(2 * num_modes).reshape(2, -1).T + var -= np.sum( + [np.linalg.det(self._hbar * quad_coeffs[:, m][n]) for m in modes for n in modes] + ) return mean, var diff --git a/strawberryfields/backends/tfbackend/__init__.py b/strawberryfields/backends/tfbackend/__init__.py index f456a43b9..edd09e0be 100644 --- a/strawberryfields/backends/tfbackend/__init__.py +++ b/strawberryfields/backends/tfbackend/__init__.py @@ -182,7 +182,7 @@ def excepthook(type, value, traceback): """Exception hook to suppress superfluous exceptions""" - #pylint: disable=unused-argument + # pylint: disable=unused-argument print(value) diff --git a/strawberryfields/backends/tfbackend/backend.py b/strawberryfields/backends/tfbackend/backend.py index 12adf4220..d968f854f 100644 --- a/strawberryfields/backends/tfbackend/backend.py +++ b/strawberryfields/backends/tfbackend/backend.py @@ -26,14 +26,15 @@ from .ops import _check_for_eval, mixed, partial_trace, reorder_modes from .states import FockStateTF + class TFBackend(BaseFock): r"""Implements a simulation of quantum optical circuits in a truncated Fock basis using `TensorFlow `_, returning a :class:`~.FockStateTF` state object. """ - short_name = 'tf' - circuit_spec = 'tf' + short_name = "tf" + circuit_spec = "tf" def __init__(self, graph=None): """Initialize a TFBackend object. @@ -50,8 +51,10 @@ def __init__(self, graph=None): else: self._graph = graph self._init_modes = None #: int: initial number of modes in the circuit - self._modemap = None #: Modemap: maps external mode indices to internal ones - self.circuit = None #: ~.tfbackend.circuit.Circuit: representation of the simulated quantum state + self._modemap = None #: Modemap: maps external mode indices to internal ones + self.circuit = ( + None #: ~.tfbackend.circuit.Circuit: representation of the simulated quantum state + ) def _remap_modes(self, modes): if isinstance(modes, int): @@ -62,7 +65,7 @@ def _remap_modes(self, modes): map_ = self._modemap.show() submap = [map_[m] for m in modes] if not self._modemap.valid(modes) or None in submap: - raise ValueError('The specified modes are not valid.') + raise ValueError("The specified modes are not valid.") remapped_modes = self._modemap.remap(modes) if was_int: @@ -89,9 +92,9 @@ def begin_circuit(self, num_subsystems, **kwargs): pure (bool): If True (default), use a pure state representation (otherwise will use a mixed state representation). batch_size (None or int): Size of the batch-axis dimension. If None, no batch-axis will be used. """ - cutoff_dim = kwargs.get('cutoff_dim', None) - pure = kwargs.get('pure', True) - batch_size = kwargs.get('batch_size', None) + cutoff_dim = kwargs.get("cutoff_dim", None) + pure = kwargs.get("pure", True) + batch_size = kwargs.get("batch_size", None) if cutoff_dim is None: raise ValueError("Argument 'cutoff_dim' must be passed to the Tensorflow backend") @@ -103,9 +106,11 @@ def begin_circuit(self, num_subsystems, **kwargs): if not isinstance(pure, bool): raise ValueError("Argument 'pure' must be either True or False") if batch_size == 1: - raise ValueError("batch_size of 1 not supported, please use different batch_size or set batch_size=None") + raise ValueError( + "batch_size of 1 not supported, please use different batch_size or set batch_size=None" + ) - with tf.name_scope('Begin_circuit'): + with tf.name_scope("Begin_circuit"): self._modemap = ModeMap(num_subsystems) circuit = Circuit(self._graph, num_subsystems, cutoff_dim, pure, batch_size) @@ -131,14 +136,16 @@ def reset(self, pure=True, **kwargs): If False, then the circuit is reset to its initial state, but ops that have already been declared are still accessible. """ - hard = kwargs.pop('hard', True) + hard = kwargs.pop("hard", True) if hard: tf.reset_default_graph() self._graph = tf.get_default_graph() - with tf.name_scope('Reset'): + with tf.name_scope("Reset"): self._modemap.reset() - self.circuit.reset(graph=self._graph, num_subsystems=self._init_modes, pure=pure, **kwargs) + self.circuit.reset( + graph=self._graph, num_subsystems=self._init_modes, pure=pure, **kwargs + ) def get_cutoff_dim(self): return self.circuit.cutoff_dim @@ -148,60 +155,60 @@ def get_modes(self): return [i for i, j in enumerate(self._modemap._map) if j is not None] def prepare_vacuum_state(self, mode): - with tf.name_scope('Prepare_vacuum'): + with tf.name_scope("Prepare_vacuum"): remapped_mode = self._remap_modes(mode) self.circuit.prepare_vacuum_state(remapped_mode) def prepare_coherent_state(self, alpha, mode): - with tf.name_scope('Prepare_coherent'): + with tf.name_scope("Prepare_coherent"): remapped_mode = self._remap_modes(mode) self.circuit.prepare_coherent_state(alpha, remapped_mode) def prepare_squeezed_state(self, r, phi, mode): - with tf.name_scope('Prepare_squeezed'): + with tf.name_scope("Prepare_squeezed"): remapped_mode = self._remap_modes(mode) self.circuit.prepare_squeezed_state(r, phi, remapped_mode) def prepare_displaced_squeezed_state(self, alpha, r, phi, mode): - with tf.name_scope('Prepare_displaced_squeezed'): + with tf.name_scope("Prepare_displaced_squeezed"): remapped_mode = self._remap_modes(mode) self.circuit.prepare_displaced_squeezed_state(alpha, r, phi, remapped_mode) def prepare_fock_state(self, n, mode): - with tf.name_scope('Prepare_fock'): + with tf.name_scope("Prepare_fock"): remapped_mode = self._remap_modes(mode) self.circuit.prepare_fock_state(n, remapped_mode) def prepare_ket_state(self, state, modes): - with tf.name_scope('Prepare_state'): + with tf.name_scope("Prepare_state"): self.circuit.prepare_multimode(state, self._remap_modes(modes), True) def prepare_dm_state(self, state, modes): - with tf.name_scope('Prepare_state'): + with tf.name_scope("Prepare_state"): self.circuit.prepare_multimode(state, self._remap_modes(modes), False) def prepare_thermal_state(self, nbar, mode): - with tf.name_scope('Prepare_thermal'): + with tf.name_scope("Prepare_thermal"): remapped_mode = self._remap_modes(mode) self.circuit.prepare_thermal_state(nbar, remapped_mode) def rotation(self, phi, mode): - with tf.name_scope('Rotation'): + with tf.name_scope("Rotation"): remapped_mode = self._remap_modes(mode) self.circuit.phase_shift(phi, remapped_mode) def displacement(self, alpha, mode): - with tf.name_scope('Displacement'): + with tf.name_scope("Displacement"): remapped_mode = self._remap_modes(mode) self.circuit.displacement(alpha, remapped_mode) def squeeze(self, z, mode): - with tf.name_scope('Squeeze'): + with tf.name_scope("Squeeze"): remapped_mode = self._remap_modes(mode) self.circuit.squeeze(z, remapped_mode) def beamsplitter(self, t, r, mode1, mode2): - with tf.name_scope('Beamsplitter'): + with tf.name_scope("Beamsplitter"): if isinstance(t, complex): raise ValueError("Beamsplitter transmittivity t must be a float.") if isinstance(t, tf.Tensor): @@ -211,22 +218,22 @@ def beamsplitter(self, t, r, mode1, mode2): self.circuit.beamsplitter(t, r, remapped_modes[0], remapped_modes[1]) def loss(self, T, mode): - with tf.name_scope('Loss'): + with tf.name_scope("Loss"): remapped_mode = self._remap_modes(mode) self.circuit.loss(T, remapped_mode) def cubic_phase(self, gamma, mode): - with tf.name_scope('Cubic_phase'): + with tf.name_scope("Cubic_phase"): remapped_mode = self._remap_modes(mode) self.circuit.cubic_phase(gamma, remapped_mode) def kerr_interaction(self, kappa, mode): - with tf.name_scope('Kerr_interaction'): + with tf.name_scope("Kerr_interaction"): remapped_mode = self._remap_modes(mode) self.circuit.kerr_interaction(kappa, remapped_mode) def cross_kerr_interaction(self, kappa, mode1, mode2): - with tf.name_scope('Cross-Kerr_interaction'): + with tf.name_scope("Cross-Kerr_interaction"): remapped_modes = self._remap_modes([mode1, mode2]) self.circuit.cross_kerr_interaction(kappa, remapped_modes[0], remapped_modes[1]) @@ -243,7 +250,7 @@ def state(self, modes=None, **kwargs): Returns: FockStateTF: state description """ - with tf.name_scope('State'): + with tf.name_scope("State"): s = self.circuit.state pure = self.circuit.state_is_pure num_modes = self.circuit.num_modes @@ -260,7 +267,9 @@ def state(self, modes=None, **kwargs): if len(modes) != len(set(modes)): raise ValueError("The specified modes cannot be duplicated.") if len(modes) > num_modes: - raise ValueError("The number of specified modes cannot be larger than the number of subsystems.") + raise ValueError( + "The number of specified modes cannot be larger than the number of subsystems." + ) if pure: # convert to mixed state representation @@ -278,7 +287,9 @@ def state(self, modes=None, **kwargs): # unless the modes were requested in order, we need to swap indices around if modes != sorted(modes): mode_permutation = np.argsort(np.argsort(modes)) - reduced_state = reorder_modes(reduced_state, mode_permutation, reduced_state_pure, batched) + reduced_state = reorder_modes( + reduced_state, mode_permutation, reduced_state_pure, batched + ) evaluate_results, session, feed_dict, close_session = _check_for_eval(kwargs) if evaluate_results: @@ -289,9 +300,16 @@ def state(self, modes=None, **kwargs): s = reduced_state modenames = ["q[{}]".format(i) for i in np.array(self.get_modes())[modes]] - state_ = FockStateTF(s, len(modes), pure, self.circuit.cutoff_dim, - graph=self._graph, batched=batched, - mode_names=modenames, eval=evaluate_results) + state_ = FockStateTF( + s, + len(modes), + pure, + self.circuit.cutoff_dim, + graph=self._graph, + batched=batched, + mode_names=modenames, + eval=evaluate_results, + ) return state_ def measure_fock(self, modes, shots=1, select=None, **kwargs): @@ -308,9 +326,10 @@ def measure_fock(self, modes, shots=1, select=None, **kwargs): tuple[int] or tuple[Tensor]: measurement outcomes """ if shots != 1: - raise NotImplementedError("TF backend currently does not support " - "shots != 1 for Fock measurement") - with tf.name_scope('Measure_fock'): + raise NotImplementedError( + "TF backend currently does not support " "shots != 1 for Fock measurement" + ) + with tf.name_scope("Measure_fock"): remapped_modes = self._remap_modes(modes) meas = self.circuit.measure_fock(remapped_modes, select=select, **kwargs) return meas @@ -332,15 +351,16 @@ def measure_homodyne(self, phi, mode, shots=1, select=None, **kwargs): float or tf.Tensor: measurement outcome """ if shots != 1: - raise NotImplementedError("TF backend currently does not support " - "shots != 1 for homodyne measurement") - with tf.name_scope('Measure_homodyne'): + raise NotImplementedError( + "TF backend currently does not support " "shots != 1 for homodyne measurement" + ) + with tf.name_scope("Measure_homodyne"): remapped_mode = self._remap_modes(mode) meas = self.circuit.measure_homodyne(phi, remapped_mode, select, **kwargs) return meas def is_vacuum(self, tol=0.0, **kwargs): - with tf.name_scope('Is_vacuum'): + with tf.name_scope("Is_vacuum"): with self.circuit.graph.as_default(): vac_elem = self.circuit.vacuum_element() if "eval" in kwargs and kwargs["eval"] is False: @@ -350,11 +370,11 @@ def is_vacuum(self, tol=0.0, **kwargs): v = sess.run(vac_elem) sess.close() - result = np.abs(v-1) <= tol + result = np.abs(v - 1) <= tol return result def del_mode(self, modes): - with tf.name_scope('Del_mode'): + with tf.name_scope("Del_mode"): remapped_modes = self._remap_modes(modes) if isinstance(remapped_modes, int): remapped_modes = [remapped_modes] @@ -362,7 +382,7 @@ def del_mode(self, modes): self._modemap.delete(modes) def add_mode(self, n=1): - with tf.name_scope('Add_mode'): + with tf.name_scope("Add_mode"): self.circuit.add_mode(n) self._modemap.add(n) diff --git a/strawberryfields/backends/tfbackend/circuit.py b/strawberryfields/backends/tfbackend/circuit.py index 1ee387bf6..acd50691c 100644 --- a/strawberryfields/backends/tfbackend/circuit.py +++ b/strawberryfields/backends/tfbackend/circuit.py @@ -41,17 +41,27 @@ from . import ops + class Circuit: """Base class for representing and operating on a collection of CV quantum optics modes in the Fock basis. The modes are initialized in the (multimode) vacuum state, using the Fock representation with given cutoff_dim. The state of the modes is manipulated by calling the various methods.""" + # pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__(self, graph, num_modes, cutoff_dim, pure, batch_size): - self._graph = None # will be set when reset is called below, but reset needs something to compare to + self._graph = ( + None # will be set when reset is called below, but reset needs something to compare to + ) self._hbar = 2 - self.reset(graph=graph, num_subsystems=num_modes, pure=pure, cutoff_dim=cutoff_dim, batch_size=batch_size) + self.reset( + graph=graph, + num_subsystems=num_modes, + pure=pure, + cutoff_dim=cutoff_dim, + batch_size=batch_size, + ) def _make_vac_states(self, cutoff_dim): """Make vacuum state tensors for the underlying graph""" @@ -59,10 +69,14 @@ def _make_vac_states(self, cutoff_dim): one = tf.cast([1.0], ops.def_type) v = tf.scatter_nd([[0]], one, [cutoff_dim]) self._single_mode_pure_vac = v - self._single_mode_mixed_vac = tf.einsum('i,j->ij', v, v) + self._single_mode_mixed_vac = tf.einsum("i,j->ij", v, v) if self._batched: - self._single_mode_pure_vac = tf.stack([self._single_mode_pure_vac] * self._batch_size) - self._single_mode_mixed_vac = tf.stack([self._single_mode_mixed_vac] * self._batch_size) + self._single_mode_pure_vac = tf.stack( + [self._single_mode_pure_vac] * self._batch_size + ) + self._single_mode_mixed_vac = tf.stack( + [self._single_mode_mixed_vac] * self._batch_size + ) def _update_state(self, new_state): """Helper function to update the state history and the current state""" @@ -81,7 +95,9 @@ def _valid_modes(self, modes): if mode < 0: raise ValueError("Specified mode number(s) cannot be negative.") elif mode >= self._num_modes: - raise ValueError("Specified mode number(s) are not compatible with number of modes.") + raise ValueError( + "Specified mode number(s) are not compatible with number of modes." + ) if len(modes) != len(set(modes)): raise ValueError("The specified modes cannot appear multiple times.") @@ -105,9 +121,11 @@ def _replace_and_update(self, replacement, modes): num_modes = len(self._state.shape) - batch_offset if not self._state_is_pure: - num_modes = int(num_modes/2) + num_modes = int(num_modes / 2) - new_state = ops.replace_modes(replacement, modes, self._state, self._state_is_pure, self._batched) + new_state = ops.replace_modes( + replacement, modes, self._state, self._state_is_pure, self._batched + ) self._update_state(new_state) # update purity depending on whether we have replaced all modes or a subset @@ -142,7 +160,7 @@ def _maybe_batch(self, param, convert_to_tensor=True): if not self._batched: broadcast_p = p else: - if len(p.shape) == 0: # pylint: disable=len-as-condition + if len(p.shape) == 0: # pylint: disable=len-as-condition # scalar broadcast_p = np.concatenate([np.expand_dims(p, 0)] * self._batch_size) elif len(p.shape) == 1: @@ -155,7 +173,11 @@ def _maybe_batch(self, param, convert_to_tensor=True): shape_err = True if shape_err: - raise ValueError("Parameter can be either a scalar or a vector of length {}.".format(self._batch_size)) + raise ValueError( + "Parameter can be either a scalar or a vector of length {}.".format( + self._batch_size + ) + ) else: return broadcast_p @@ -203,39 +225,39 @@ def reset(self, **kwargs): cutoff_dim (int): new Fock space cutoff dimension to use. batch_size (None, int): None means no batching. An integer value >= 2 sets the batch size to use. """ - if 'pure' in kwargs: - pure = kwargs['pure'] + if "pure" in kwargs: + pure = kwargs["pure"] if not isinstance(pure, bool): raise ValueError("Argument 'pure' must be either True or False") self._state_is_pure = pure - if 'num_subsystems' in kwargs: - ns = kwargs['num_subsystems'] + if "num_subsystems" in kwargs: + ns = kwargs["num_subsystems"] if not isinstance(ns, int): raise ValueError("Argument 'num_subsystems' must be a positive integer") self._num_modes = ns - if 'cutoff_dim' in kwargs: - cutoff_dim = kwargs['cutoff_dim'] + if "cutoff_dim" in kwargs: + cutoff_dim = kwargs["cutoff_dim"] if not isinstance(cutoff_dim, int) or cutoff_dim < 1: raise ValueError("Argument 'cutoff_dim' must be a positive integer") self._cutoff_dim = cutoff_dim - if 'batch_size' in kwargs: - batch_size = kwargs['batch_size'] + if "batch_size" in kwargs: + batch_size = kwargs["batch_size"] if batch_size is not None: if not isinstance(batch_size, int) or batch_size < 2: raise ValueError("Argument 'batch_size' must be either None or an integer > 1") self._batch_size = batch_size - self._batched = (batch_size is not None) + self._batched = batch_size is not None - if 'graph' in kwargs: - graph = kwargs['graph'] + if "graph" in kwargs: + graph = kwargs["graph"] if not isinstance(graph, tf.Graph): raise ValueError("Argument 'graph' must be a tf.Graph") if graph != self._graph: self._graph = graph - ops.get_prefac_tensor.cache_clear() # clear any cached tensors that may live on old graph + ops.get_prefac_tensor.cache_clear() # clear any cached tensors that may live on old graph self._state_history = [] self._cache = {} @@ -246,7 +268,9 @@ def reset(self, **kwargs): if self._num_modes == 1: vac = single_mode_vac else: - vac = ops.combine_single_modes([single_mode_vac] * self._num_modes, self._batch_size) + vac = ops.combine_single_modes( + [single_mode_vac] * self._num_modes, self._batch_size + ) vac = tf.identity(vac, name="Vacuum") self._update_state(vac) @@ -269,7 +293,9 @@ def prepare_fock_state(self, n, mode): if self._valid_modes(mode): with self._graph.as_default(): n = self._maybe_batch(n, convert_to_tensor=False) - fock_state = ops.fock_state(n, D=self._cutoff_dim, pure=self._state_is_pure, batched=self._batched) + fock_state = ops.fock_state( + n, D=self._cutoff_dim, pure=self._state_is_pure, batched=self._batched + ) self._replace_and_update(fock_state, mode) def prepare_coherent_state(self, alpha, mode): @@ -280,7 +306,9 @@ def prepare_coherent_state(self, alpha, mode): with self._graph.as_default(): alpha = tf.cast(alpha, ops.def_type) alpha = self._maybe_batch(alpha) - coherent_state = ops.coherent_state(alpha, D=self._cutoff_dim, pure=self._state_is_pure, batched=self._batched) + coherent_state = ops.coherent_state( + alpha, D=self._cutoff_dim, pure=self._state_is_pure, batched=self._batched + ) self._replace_and_update(coherent_state, mode) def prepare_squeezed_state(self, r, theta, mode): @@ -292,7 +320,9 @@ def prepare_squeezed_state(self, r, theta, mode): r = self._maybe_batch(r) theta = self._maybe_batch(theta) self._check_incompatible_batches(r, theta) - squeezed_state = ops.squeezed_vacuum(r, theta, D=self._cutoff_dim, pure=self._state_is_pure, batched=self._batched) + squeezed_state = ops.squeezed_vacuum( + r, theta, D=self._cutoff_dim, pure=self._state_is_pure, batched=self._batched + ) self._replace_and_update(squeezed_state, mode) def prepare_displaced_squeezed_state(self, alpha, r, phi, mode): @@ -305,7 +335,14 @@ def prepare_displaced_squeezed_state(self, alpha, r, phi, mode): r = self._maybe_batch(r) phi = self._maybe_batch(phi) self._check_incompatible_batches(alpha, r, phi) - displaced_squeezed = ops.displaced_squeezed(alpha, r, phi, D=self._cutoff_dim, pure=self._state_is_pure, batched=self._batched) + displaced_squeezed = ops.displaced_squeezed( + alpha, + r, + phi, + D=self._cutoff_dim, + pure=self._state_is_pure, + batched=self._batched, + ) self._replace_and_update(displaced_squeezed, mode) def prepare_multimode(self, state, modes, input_state_is_pure=False): @@ -340,14 +377,16 @@ def prepare_multimode(self, state, modes, input_state_is_pure=False): n_modes = len(modes) if input_state_is_pure: - input_is_batched = (len(state.shape) > n_modes or (len(state.shape) == 2 and state.shape[1] == self._cutoff_dim**n_modes)) + input_is_batched = len(state.shape) > n_modes or ( + len(state.shape) == 2 and state.shape[1] == self._cutoff_dim ** n_modes + ) else: input_is_batched = len(state.shape) % 2 == 1 - pure_shape = tuple([self._cutoff_dim]*n_modes) - mixed_shape = tuple([self._cutoff_dim]*(2*n_modes)) - pure_shape_as_vector = tuple([self._cutoff_dim**n_modes]) - mixed_shape_as_matrix = tuple([self._cutoff_dim**n_modes]*2) + pure_shape = tuple([self._cutoff_dim] * n_modes) + mixed_shape = tuple([self._cutoff_dim] * (2 * n_modes)) + pure_shape_as_vector = tuple([self._cutoff_dim ** n_modes]) + mixed_shape_as_matrix = tuple([self._cutoff_dim ** n_modes] * 2) if input_is_batched: pure_shape = (self._batch_size,) + pure_shape mixed_shape = (self._batch_size,) + mixed_shape @@ -383,7 +422,9 @@ def phase_shift(self, theta, mode): """ with self._graph.as_default(): theta = self._maybe_batch(theta) - new_state = ops.phase_shifter(theta, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched) + new_state = ops.phase_shifter( + theta, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched + ) self._update_state(new_state) def displacement(self, alpha, mode): @@ -392,7 +433,9 @@ def displacement(self, alpha, mode): """ with self._graph.as_default(): alpha = self._maybe_batch(alpha) - new_state = ops.displacement(alpha, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched) + new_state = ops.displacement( + alpha, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched + ) self._update_state(new_state) def squeeze(self, z, mode): @@ -408,7 +451,9 @@ def squeeze(self, z, mode): r = self._maybe_batch(r) theta = self._maybe_batch(theta) self._check_incompatible_batches(r, theta) - new_state = ops.squeezer(r, theta, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched) + new_state = ops.squeezer( + r, theta, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched + ) self._update_state(new_state) def beamsplitter(self, t, r, mode1, mode2): @@ -419,7 +464,16 @@ def beamsplitter(self, t, r, mode1, mode2): t = self._maybe_batch(t) r = self._maybe_batch(r) self._check_incompatible_batches(t, r) - new_state = ops.beamsplitter(t, r, mode1, mode2, self._state, self._cutoff_dim, self._state_is_pure, self._batched) + new_state = ops.beamsplitter( + t, + r, + mode1, + mode2, + self._state, + self._cutoff_dim, + self._state_is_pure, + self._batched, + ) self._update_state(new_state) def kerr_interaction(self, kappa, mode): @@ -429,7 +483,9 @@ def kerr_interaction(self, kappa, mode): with self._graph.as_default(): k = tf.cast(kappa, ops.def_type) k = self._maybe_batch(k) - new_state = ops.kerr_interaction(k, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched) + new_state = ops.kerr_interaction( + k, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched + ) self._update_state(new_state) def cross_kerr_interaction(self, kappa, mode1, mode2): @@ -439,7 +495,9 @@ def cross_kerr_interaction(self, kappa, mode1, mode2): with self._graph.as_default(): k = tf.cast(kappa, ops.def_type) k = self._maybe_batch(k) - new_state = ops.cross_kerr_interaction(k, mode1, mode2, self._state, self._cutoff_dim, self._state_is_pure, self._batched) + new_state = ops.cross_kerr_interaction( + k, mode1, mode2, self._state, self._cutoff_dim, self._state_is_pure, self._batched + ) self._update_state(new_state) def cubic_phase(self, gamma, mode): @@ -449,7 +507,15 @@ def cubic_phase(self, gamma, mode): with self._graph.as_default(): g = tf.cast(gamma, ops.def_type) g = self._maybe_batch(g) - new_state = ops.cubic_phase(g, mode, self._state, self._cutoff_dim, self._hbar, self._state_is_pure, self._batched) + new_state = ops.cubic_phase( + g, + mode, + self._state, + self._cutoff_dim, + self._hbar, + self._state_is_pure, + self._batched, + ) self._update_state(new_state) def loss(self, T, mode): @@ -460,9 +526,11 @@ def loss(self, T, mode): # in_modes = self._state T = tf.cast(T, ops.def_type) T = self._maybe_batch(T) - new_state = ops.loss_channel(T, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched) + new_state = ops.loss_channel( + T, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched + ) self._update_state(new_state) - self._state_is_pure = False # loss output always in mixed state representation + self._state_is_pure = False # loss output always in mixed state representation def vacuum_element(self): """Compute the fidelity with the (multi-mode) vacuum state.""" @@ -504,15 +572,19 @@ def measure_fock(self, modes, select=None, **kwargs): select = np.array(select) # check for valid 'modes' argument - if len(modes) == 0 or len(modes) > self._num_modes or len(modes) != len(set(modes)): #pylint: disable=len-as-condition + if ( + len(modes) == 0 or len(modes) > self._num_modes or len(modes) != len(set(modes)) + ): # pylint: disable=len-as-condition raise ValueError("Specified modes are not valid.") if np.any(modes != sorted(modes)): raise ValueError("'modes' must be sorted in increasing order.") # check for valid 'select' argument if select is not None: - if np.any(select == None): #pylint: disable=singleton-comparison - raise NotImplementedError("Post-selection lists must only contain numerical values.") + if np.any(select == None): # pylint: disable=singleton-comparison + raise NotImplementedError( + "Post-selection lists must only contain numerical values." + ) if self._batched: num_meas_modes = len(modes) # in this case, select must either be: @@ -573,7 +645,9 @@ def measure_fock(self, modes, select=None, **kwargs): for m in range(self._num_modes): if m not in modes: new_mode_idx = m - removed_ctr - reduced_state = ops.partial_trace(reduced_state, new_mode_idx, red_state_is_pure, self._batched) + reduced_state = ops.partial_trace( + reduced_state, new_mode_idx, red_state_is_pure, self._batched + ) red_state_is_pure = False removed_ctr += 1 # go from bra_A,ket_A,bra_B,ket_B,... -> bra_A,bra_B,ket_A,ket_B,... since this is what diag_part expects @@ -582,7 +656,7 @@ def measure_fock(self, modes, select=None, **kwargs): state_indices = np.arange(batch_offset + 2 * num_reduced_state_modes) batch_index = state_indices[:batch_offset] bra_indices = state_indices[batch_offset::2] - ket_indices = state_indices[batch_offset + 1::2] + ket_indices = state_indices[batch_offset + 1 :: 2] transpose_list = np.concatenate([batch_index, bra_indices, ket_indices]) reduced_state_reshuffled = tf.transpose(reduced_state, transpose_list) else: @@ -600,9 +674,11 @@ def measure_fock(self, modes, select=None, **kwargs): # sample_val is a single integer for each batch entry; # we need to convert it to the corresponding [n0,n1,n2,...] - meas_result = ops.unravel_index(sample_tensor, [self._cutoff_dim] * num_reduced_state_modes) + meas_result = ops.unravel_index( + sample_tensor, [self._cutoff_dim] * num_reduced_state_modes + ) if not self._batched: - meas_result = meas_result[0] # no batch index, can get rid of first axis + meas_result = meas_result[0] # no batch index, can get rid of first axis # unstack this here because that's how it should be returned meas_result = tf.unstack(meas_result, axis=-1, name="Meas_result") @@ -619,14 +695,18 @@ def measure_fock(self, modes, select=None, **kwargs): self.reset(pure=self._state_is_pure) else: # only some modes were measured: put unmeasured modes in conditional state, while reseting measured modes to vac - fock_state = tf.one_hot(tf.stack(meas_result, axis=-1), depth=self._cutoff_dim, dtype=ops.def_type) + fock_state = tf.one_hot( + tf.stack(meas_result, axis=-1), depth=self._cutoff_dim, dtype=ops.def_type + ) conditional_state = self._state for idx, mode in enumerate(modes): if self._batched: f = fock_state[:, idx] else: f = fock_state[idx] - conditional_state = ops.conditional_state(conditional_state, f, mode, self._state_is_pure, batched=self._batched) + conditional_state = ops.conditional_state( + conditional_state, f, mode, self._state_is_pure, batched=self._batched + ) if self._state_is_pure: norm = tf.norm(tf.reshape(conditional_state, [batch_size, -1]), axis=1) @@ -646,26 +726,40 @@ def measure_fock(self, modes, select=None, **kwargs): normalized_conditional_state = conditional_state / tf.reshape(norm, norm_reshape) # reset measured modes into vacuum - single_mode_vac = self._single_mode_pure_vac if self._state_is_pure else self._single_mode_mixed_vac + single_mode_vac = ( + self._single_mode_pure_vac + if self._state_is_pure + else self._single_mode_mixed_vac + ) if len(modes) == 1: meas_modes_vac = single_mode_vac else: - meas_modes_vac = ops.combine_single_modes([single_mode_vac] * len(modes), self._batched) + meas_modes_vac = ops.combine_single_modes( + [single_mode_vac] * len(modes), self._batched + ) batch_index = indices[:batch_offset] - meas_mode_indices = indices[batch_offset:batch_offset + mode_size * len(modes)] - conditional_indices = indices[batch_offset + mode_size * len(modes) : batch_offset + mode_size * self._num_modes] + meas_mode_indices = indices[batch_offset : batch_offset + mode_size * len(modes)] + conditional_indices = indices[ + batch_offset + + mode_size * len(modes) : batch_offset + + mode_size * self._num_modes + ] eqn_lhs = batch_index + meas_mode_indices + "," + batch_index + conditional_indices - eqn_rhs = '' + eqn_rhs = "" meas_ctr = 0 cond_ctr = 0 for m in range(self._num_modes): if m in modes: # use measured_indices - eqn_rhs += meas_mode_indices[mode_size * meas_ctr : mode_size * (meas_ctr + 1)] + eqn_rhs += meas_mode_indices[ + mode_size * meas_ctr : mode_size * (meas_ctr + 1) + ] meas_ctr += 1 else: # use conditional indices - eqn_rhs += conditional_indices[mode_size * cond_ctr : mode_size * (cond_ctr + 1)] + eqn_rhs += conditional_indices[ + mode_size * cond_ctr : mode_size * (cond_ctr + 1) + ] cond_ctr += 1 eqn = eqn_lhs + "->" + batch_index + eqn_rhs new_state = tf.einsum(eqn, meas_modes_vac, normalized_conditional_state) @@ -697,7 +791,7 @@ def measure_homodyne(self, phi, mode, select=None, **kwargs): if mode < 0 or mode >= self._num_modes: raise ValueError("Specified modes are not valid.") - m_omega_over_hbar = 1/self._hbar + m_omega_over_hbar = 1 / self._hbar if self._state_is_pure: mode_size = 1 else: @@ -720,11 +814,15 @@ def measure_homodyne(self, phi, mode, select=None, **kwargs): homodyne_sample = tf.cast(meas_result, tf.float64, name="Meas_result") else: # create reduced state on mode to be measured - reduced_state = ops.reduced_density_matrix(self._state, mode, self._state_is_pure, self._batched) + reduced_state = ops.reduced_density_matrix( + self._state, mode, self._state_is_pure, self._batched + ) # rotate to homodyne basis # pylint: disable=invalid-unary-operand-type - reduced_state = ops.phase_shifter(-phi, 0, reduced_state, self._cutoff_dim, False, self._batched) + reduced_state = ops.phase_shifter( + -phi, 0, reduced_state, self._cutoff_dim, False, self._batched + ) # create pdf for homodyne measurement # We use the following quadrature wavefunction for the Fock states: @@ -763,14 +861,26 @@ def measure_homodyne(self, phi, mode, select=None, **kwargs): self._cache["hermite_polys"] = hermite_polys number_state_indices = [k for k in product(range(self._cutoff_dim), repeat=2)] - terms = [1 / np.sqrt(2 ** n * factorial(n) * 2 ** m * factorial(m)) * hermite_polys[n] * hermite_polys[m] - for n, m in number_state_indices] - hermite_matrix = tf.scatter_nd(number_state_indices, terms, [self._cutoff_dim, self._cutoff_dim, num_bins]) - hermite_terms = tf.multiply(tf.expand_dims(reduced_state, -1), tf.expand_dims(tf.cast(hermite_matrix, ops.def_type), 0)) - rho_dist = tf.cast(tf.reduce_sum(hermite_terms, axis=[1, 2]), tf.float64) \ - * (m_omega_over_hbar / np.pi) ** 0.5 \ - * tf.exp(- x ** 2) \ - * (q_tensor[1] - q_tensor[0]) # Delta_q for normalization (only works if the bins are equally spaced) + terms = [ + 1 + / np.sqrt(2 ** n * factorial(n) * 2 ** m * factorial(m)) + * hermite_polys[n] + * hermite_polys[m] + for n, m in number_state_indices + ] + hermite_matrix = tf.scatter_nd( + number_state_indices, terms, [self._cutoff_dim, self._cutoff_dim, num_bins] + ) + hermite_terms = tf.multiply( + tf.expand_dims(reduced_state, -1), + tf.expand_dims(tf.cast(hermite_matrix, ops.def_type), 0), + ) + rho_dist = ( + tf.cast(tf.reduce_sum(hermite_terms, axis=[1, 2]), tf.float64) + * (m_omega_over_hbar / np.pi) ** 0.5 + * tf.exp(-(x ** 2)) + * (q_tensor[1] - q_tensor[0]) + ) # Delta_q for normalization (only works if the bins are equally spaced) # use tf.multinomial to sample logprobs = tf.log(rho_dist) @@ -791,15 +901,34 @@ def measure_homodyne(self, phi, mode, select=None, **kwargs): self.reset(pure=self._state_is_pure) else: # only some modes were measured: put unmeasured modes in conditional state, while reseting measured modes to vac - inf_squeezed_vac = tf.convert_to_tensor([(-0.5) ** (m // 2) * np.sqrt(factorial(m)) / factorial(m // 2) if m % 2 == 0 else 0. for m in range(self._cutoff_dim)], - dtype=ops.def_type) + inf_squeezed_vac = tf.convert_to_tensor( + [ + (-0.5) ** (m // 2) * np.sqrt(factorial(m)) / factorial(m // 2) + if m % 2 == 0 + else 0.0 + for m in range(self._cutoff_dim) + ], + dtype=ops.def_type, + ) if self._batched: inf_squeezed_vac = tf.tile(tf.expand_dims(inf_squeezed_vac, 0), [batch_size, 1]) - displacement_size = tf.stack(tf.convert_to_tensor(meas_result * np.sqrt(m_omega_over_hbar / 2))) - quad_eigenstate = ops.displacement(displacement_size, 0, inf_squeezed_vac, self._cutoff_dim, True, self._batched) - homodyne_eigenstate = ops.phase_shifter(phi, 0, quad_eigenstate, self._cutoff_dim, True, self._batched) - - conditional_state = ops.conditional_state(self._state, homodyne_eigenstate, mode, self._state_is_pure, batched=self._batched) + displacement_size = tf.stack( + tf.convert_to_tensor(meas_result * np.sqrt(m_omega_over_hbar / 2)) + ) + quad_eigenstate = ops.displacement( + displacement_size, 0, inf_squeezed_vac, self._cutoff_dim, True, self._batched + ) + homodyne_eigenstate = ops.phase_shifter( + phi, 0, quad_eigenstate, self._cutoff_dim, True, self._batched + ) + + conditional_state = ops.conditional_state( + self._state, + homodyne_eigenstate, + mode, + self._state_is_pure, + batched=self._batched, + ) # normalize if self._state_is_pure: @@ -820,22 +949,32 @@ def measure_homodyne(self, phi, mode, select=None, **kwargs): normalized_conditional_state = conditional_state / tf.reshape(norm, norm_reshape) # reset measured modes into vacuum - meas_mode_vac = self._single_mode_pure_vac if self._state_is_pure else self._single_mode_mixed_vac + meas_mode_vac = ( + self._single_mode_pure_vac + if self._state_is_pure + else self._single_mode_mixed_vac + ) batch_index = indices[:batch_offset] meas_mode_indices = indices[batch_offset : batch_offset + mode_size] - conditional_indices = indices[batch_offset + mode_size : batch_offset + mode_size * self._num_modes] + conditional_indices = indices[ + batch_offset + mode_size : batch_offset + mode_size * self._num_modes + ] eqn_lhs = batch_index + meas_mode_indices + "," + batch_index + conditional_indices - eqn_rhs = '' + eqn_rhs = "" meas_ctr = 0 cond_ctr = 0 for m in range(self._num_modes): if m == mode: # use measured_indices - eqn_rhs += meas_mode_indices[mode_size * meas_ctr : mode_size * (meas_ctr + 1)] + eqn_rhs += meas_mode_indices[ + mode_size * meas_ctr : mode_size * (meas_ctr + 1) + ] meas_ctr += 1 else: # use conditional indices - eqn_rhs += conditional_indices[mode_size * cond_ctr : mode_size * (cond_ctr + 1)] + eqn_rhs += conditional_indices[ + mode_size * cond_ctr : mode_size * (cond_ctr + 1) + ] cond_ctr += 1 eqn = eqn_lhs + "->" + batch_index + eqn_rhs new_state = tf.einsum(eqn, meas_mode_vac, normalized_conditional_state) diff --git a/strawberryfields/backends/tfbackend/ops.py b/strawberryfields/backends/tfbackend/ops.py index b2d44e08b..f36b0bcea 100644 --- a/strawberryfields/backends/tfbackend/ops.py +++ b/strawberryfields/backends/tfbackend/ops.py @@ -35,7 +35,12 @@ import numpy as np from scipy.special import binom, factorial -from strawberryfields.backends.shared_ops import generate_bs_factors, load_bs_factors, save_bs_factors, squeeze_parity +from strawberryfields.backends.shared_ops import ( + generate_bs_factors, + load_bs_factors, + save_bs_factors, + squeeze_parity, +) def_type = tf.complex64 max_num_indices = len(indices) @@ -44,6 +49,7 @@ # Helper functions: + def _numer_safe_power(base, exponent): """gives the desired behaviour of 0**0=1""" if exponent == 0: @@ -51,33 +57,40 @@ def _numer_safe_power(base, exponent): return base ** exponent + def mixed(pure_state, batched=False): """Converts the state from pure to mixed""" - #todo: In the fock backend mixing is done by ops.mix(), maybe the functions should be named identically? + # todo: In the fock backend mixing is done by ops.mix(), maybe the functions should be named identically? if not batched: - pure_state = tf.expand_dims(pure_state, 0) # add in fake batch dimension + pure_state = tf.expand_dims(pure_state, 0) # add in fake batch dimension batch_offset = 1 num_modes = len(pure_state.shape) - batch_offset max_num = (max_num_indices - batch_offset) // 2 if num_modes > max_num: - raise ValueError("Converting state from pure to mixed currently only supported for {} modes.".format(max_num)) + raise ValueError( + "Converting state from pure to mixed currently only supported for {} modes.".format( + max_num + ) + ) else: # eqn: 'abc...xyz,ABC...XYZ->aAbBcC...xXyYzZ' (lowercase belonging to 'bra' side, uppercase belonging to 'ket' side) batch_index = indices[:batch_offset] - bra_indices = indices[batch_offset: batch_offset + num_modes] + bra_indices = indices[batch_offset : batch_offset + num_modes] ket_indices = indices[batch_offset + num_modes : batch_offset + 2 * num_modes] eqn_lhs = batch_index + bra_indices + "," + batch_index + ket_indices eqn_rhs = "".join(bdx + kdx for bdx, kdx in zip(bra_indices, ket_indices)) eqn = eqn_lhs + "->" + batch_index + eqn_rhs mixed_state = tf.einsum(eqn, pure_state, tf.conj(pure_state)) if not batched: - mixed_state = tf.squeeze(mixed_state, 0) # drop fake batch dimension + mixed_state = tf.squeeze(mixed_state, 0) # drop fake batch dimension return mixed_state + def batchify_indices(idxs, batch_size): """adds batch indices to the index numbering""" return [(bdx,) + idxs[i] for bdx in range(batch_size) for i in range(len(idxs))] + def unravel_index(ind, tensor_shape): """no official tensorflow implementation for this yet; this is based on one proposed in https://github.com/tensorflow/tensorflow/issues/2075 @@ -89,6 +102,7 @@ def unravel_index(ind, tensor_shape): unraveled_coords = (ind % strides) // strides_shifted return tf.transpose(unraveled_coords) + @lru_cache() def get_prefac_tensor(D, directory, save): """Equivalent to the functionality of shared_ops the bs_factors functions from shared_ops, @@ -103,6 +117,7 @@ def get_prefac_tensor(D, directory, save): prefac = tf.expand_dims(tf.cast(prefac[:D, :D, :D, :D, :D], def_type), 0) return prefac + ################################################################### # Matrices: @@ -110,14 +125,30 @@ def get_prefac_tensor(D, directory, save): # which transforms from the input (truncated) Fock basis to the # output (truncated) Fock basis. + def squeezed_vacuum_vector(r, theta, D, batched=False, eps=1e-32): """returns the ket representing a single mode squeezed vacuum state""" if batched: batch_size = r.shape[0].value r = tf.cast(r, def_type) theta = tf.cast(theta, def_type) - c1 = tf.cast(tf.stack([tf.sqrt(1 / tf.cosh(r)) * np.sqrt(factorial(k)) / factorial(k / 2.) for k in range(0, D, 2)], axis=-1), def_type) - c2 = tf.stack([(-0.5 * tf.exp(1j * theta) * tf.cast(tf.tanh(r + eps), def_type)) ** (k / 2.) for k in range(0, D, 2)], axis=-1) + c1 = tf.cast( + tf.stack( + [ + tf.sqrt(1 / tf.cosh(r)) * np.sqrt(factorial(k)) / factorial(k / 2.0) + for k in range(0, D, 2) + ], + axis=-1, + ), + def_type, + ) + c2 = tf.stack( + [ + (-0.5 * tf.exp(1j * theta) * tf.cast(tf.tanh(r + eps), def_type)) ** (k / 2.0) + for k in range(0, D, 2) + ], + axis=-1, + ) even_coeffs = c1 * c2 ind = [(k,) for k in np.arange(0, D, 2)] shape = [D] @@ -127,11 +158,12 @@ def squeezed_vacuum_vector(r, theta, D, batched=False, eps=1e-32): output = tf.scatter_nd(ind, tf.reshape(even_coeffs, [-1]), shape) return output + def squeezer_matrix(r, theta, D, batched=False): """creates the single mode squeeze matrix""" r = tf.cast(r, tf.float64) if not batched: - r = tf.expand_dims(r, 0) # introduce artificial batch dimension + r = tf.expand_dims(r, 0) # introduce artificial batch dimension r = tf.reshape(r, [-1, 1, 1, 1]) theta = tf.cast(theta, def_type) theta = tf.reshape(theta, [-1, 1, 1, 1]) @@ -141,18 +173,24 @@ def squeezer_matrix(r, theta, D, batched=False): m = np.reshape(rng, [-1, 1, D, 1]) k = np.reshape(rng, [-1, 1, 1, D]) - phase = tf.exp(1j * theta * (n-m) / 2) + phase = tf.exp(1j * theta * (n - m) / 2) signs = squeeze_parity(D).reshape([1, D, 1, D]) - mask = np.logical_and((m + n) % 2 == 0, k <= np.minimum(m, n)) # kills off terms where the sum index k goes past min(m,n) - k_terms = signs * \ - tf.pow(tf.sinh(r) / 2, mask * (n + m - 2 * k) / 2) * mask / \ - tf.pow(tf.cosh(r), (n + m + 1) / 2) * \ - tf.exp(0.5 * tf.lgamma(tf.cast(m + 1, tf.float64)) + \ - 0.5 * tf.lgamma(tf.cast(n + 1, tf.float64)) - \ - tf.lgamma(tf.cast(k + 1, tf.float64)) - - tf.lgamma(tf.cast((m - k) / 2 + 1, tf.float64)) - \ - tf.lgamma(tf.cast((n - k) / 2 + 1, tf.float64)) - ) + mask = np.logical_and( + (m + n) % 2 == 0, k <= np.minimum(m, n) + ) # kills off terms where the sum index k goes past min(m,n) + k_terms = ( + signs + * tf.pow(tf.sinh(r) / 2, mask * (n + m - 2 * k) / 2) + * mask + / tf.pow(tf.cosh(r), (n + m + 1) / 2) + * tf.exp( + 0.5 * tf.lgamma(tf.cast(m + 1, tf.float64)) + + 0.5 * tf.lgamma(tf.cast(n + 1, tf.float64)) + - tf.lgamma(tf.cast(k + 1, tf.float64)) + - tf.lgamma(tf.cast((m - k) / 2 + 1, tf.float64)) + - tf.lgamma(tf.cast((n - k) / 2 + 1, tf.float64)) + ) + ) output = tf.reduce_sum(phase * tf.cast(k_terms, def_type), axis=-1) if not batched: @@ -160,6 +198,7 @@ def squeezer_matrix(r, theta, D, batched=False): output = tf.squeeze(output, 0) return output + def phase_shifter_matrix(theta, D, batched=False): """creates the single mode phase shifter matrix""" if batched: @@ -175,6 +214,7 @@ def phase_shifter_matrix(theta, D, batched=False): diag_matrix = tf.matrix_set_diag(zero_matrix, diag) return diag_matrix + def kerr_interaction_matrix(kappa, D, batched=False): """creates the single mode Kerr interaction matrix""" coeffs = [tf.exp(1j * kappa * n ** 2) for n in range(D)] @@ -183,6 +223,7 @@ def kerr_interaction_matrix(kappa, D, batched=False): output = tf.matrix_diag(coeffs) return output + def cross_kerr_interaction_matrix(kappa, D, batched=False): """creates the two mode cross-Kerr interaction matrix""" coeffs = [tf.exp(1j * kappa * n1 * n2) for n1 in range(D) for n2 in range(D)] @@ -190,11 +231,12 @@ def cross_kerr_interaction_matrix(kappa, D, batched=False): coeffs = tf.stack(coeffs, axis=1) output = tf.matrix_diag(coeffs) if batched: - output = tf.transpose(tf.reshape(output, [-1] + [D]*4), [0, 1, 3, 2, 4]) + output = tf.transpose(tf.reshape(output, [-1] + [D] * 4), [0, 1, 3, 2, 4]) else: - output = tf.transpose(tf.reshape(output, [D]*4), [0, 2, 1, 3]) + output = tf.transpose(tf.reshape(output, [D] * 4), [0, 2, 1, 3]) return output + def cubic_phase_matrix(gamma, D, hbar, batched=False, method="self_adjoint_eig"): """creates the single mode cubic phase matrix""" a, ad = ladder_ops(D) @@ -215,17 +257,18 @@ def cubic_phase_matrix(gamma, D, hbar, batched=False, method="self_adjoint_eig") # below version works for matrices larger than 18x18, but # expm was not added until TF v1.5, while # as of TF v1.5, gradient of expm is not implemented - #elif method == "expm": + # elif method == "expm": # V = tf.linalg.expm(1j * H0) else: raise ValueError("'method' must be either 'self_adjoint_eig' or 'expm'.") return V + def loss_superop(T, D, batched=False): """creates the single mode loss channel matrix""" # indices are abcd, corresponding to action |a>cde...a...xyz' (pure state) @@ -416,30 +501,63 @@ def single_mode_gate(matrix, mode, in_modes, pure=True, batched=False): else: batch_offset = 0 batch_index = indices[:batch_offset] - left_gate_str = indices[batch_offset : batch_offset + 2] # |a> max_len: - raise NotImplementedError("The max number of supported modes for this operation is currently {}".format(max_len)) + raise NotImplementedError( + "The max number of supported modes for this operation is currently {}".format(max_len) + ) if mode < 0 or mode >= num_modes: raise ValueError("'mode' argument is not compatible with number of in_modes") else: - other_modes_indices = indices[batch_offset + 2 * mode_size : batch_offset + (1 + num_modes) * mode_size] + other_modes_indices = indices[ + batch_offset + 2 * mode_size : batch_offset + (1 + num_modes) * mode_size + ] if pure: - eqn_lhs = "{},{}{}{}{}".format(batch_index + left_gate_str, batch_index, other_modes_indices[:mode * mode_size], left_gate_str[1], other_modes_indices[mode * mode_size:]) - eqn_rhs = "".join([batch_index, other_modes_indices[:mode * mode_size], left_gate_str[0], other_modes_indices[mode * mode_size:]]) + eqn_lhs = "{},{}{}{}{}".format( + batch_index + left_gate_str, + batch_index, + other_modes_indices[: mode * mode_size], + left_gate_str[1], + other_modes_indices[mode * mode_size :], + ) + eqn_rhs = "".join( + [ + batch_index, + other_modes_indices[: mode * mode_size], + left_gate_str[0], + other_modes_indices[mode * mode_size :], + ] + ) else: - eqn_lhs = "{},{}{}{}{}{},{}".format(batch_index + left_gate_str, batch_index, other_modes_indices[:mode * mode_size], left_gate_str[1], right_gate_str[0], other_modes_indices[mode * mode_size:], batch_index + right_gate_str) - eqn_rhs = "".join([batch_index, other_modes_indices[:mode * mode_size], left_gate_str[0], right_gate_str[1], other_modes_indices[mode * mode_size:]]) + eqn_lhs = "{},{}{}{}{}{},{}".format( + batch_index + left_gate_str, + batch_index, + other_modes_indices[: mode * mode_size], + left_gate_str[1], + right_gate_str[0], + other_modes_indices[mode * mode_size :], + batch_index + right_gate_str, + ) + eqn_rhs = "".join( + [ + batch_index, + other_modes_indices[: mode * mode_size], + left_gate_str[0], + right_gate_str[1], + other_modes_indices[mode * mode_size :], + ] + ) eqn = eqn_lhs + "->" + eqn_rhs einsum_inputs = [matrix, in_modes] @@ -449,6 +567,7 @@ def single_mode_gate(matrix, mode, in_modes, pure=True, batched=False): output = tf.einsum(eqn, *einsum_inputs) return output + def two_mode_gate(matrix, mode1, mode2, in_modes, pure=True, batched=False): """basic form: 'abcd,efg...b...d...xyz->efg...a...c...xyz' (pure state) @@ -460,13 +579,13 @@ def two_mode_gate(matrix, mode1, mode2, in_modes, pure=True, batched=False): else: batch_offset = 0 batch_index = indices[:batch_offset] - left_gate_str = indices[batch_offset : batch_offset + 4] # |a> max_len: - raise NotImplementedError("The max number of supported modes for this operation is currently {}".format(max_len)) + raise NotImplementedError( + "The max number of supported modes for this operation is currently {}".format(max_len) + ) else: min_mode = min(mode1, mode2) max_mode = max(mode1, mode2) if min_mode < 0 or max_mode >= num_modes or mode1 == mode2: raise ValueError("One or more mode numbers are incompatible") else: - other_modes_indices = indices[batch_offset + 4 * mode_size : batch_offset + 4 * mode_size + mode_size * (num_modes - 2)] + other_modes_indices = indices[ + batch_offset + + 4 * mode_size : batch_offset + + 4 * mode_size + + mode_size * (num_modes - 2) + ] # build equation if mode1 == min_mode: lhs_min_mode_indices = left_gate_str[1] @@ -504,22 +630,27 @@ def two_mode_gate(matrix, mode1, mode2, in_modes, pure=True, batched=False): lhs_max_mode_indices += right_gate_str[0] rhs_min_mode_indices += right_gate_str[3] rhs_max_mode_indices += right_gate_str[1] - eqn_lhs = "{},{}{}{}{}{}{}".format(batch_index + left_gate_str, - batch_index, - other_modes_indices[:min_mode * mode_size], - lhs_min_mode_indices, - other_modes_indices[min_mode * mode_size : (max_mode - 1) * mode_size], - lhs_max_mode_indices, - other_modes_indices[(max_mode - 1) * mode_size:]) + eqn_lhs = "{},{}{}{}{}{}{}".format( + batch_index + left_gate_str, + batch_index, + other_modes_indices[: min_mode * mode_size], + lhs_min_mode_indices, + other_modes_indices[min_mode * mode_size : (max_mode - 1) * mode_size], + lhs_max_mode_indices, + other_modes_indices[(max_mode - 1) * mode_size :], + ) if not pure: eqn_lhs += "," + batch_index + right_gate_str - eqn_rhs = "".join([batch_index, - other_modes_indices[:min_mode * mode_size], - rhs_min_mode_indices, - other_modes_indices[min_mode * mode_size: (max_mode - 1) * mode_size], - rhs_max_mode_indices, - other_modes_indices[(max_mode - 1) * mode_size:] - ]) + eqn_rhs = "".join( + [ + batch_index, + other_modes_indices[: min_mode * mode_size], + rhs_min_mode_indices, + other_modes_indices[min_mode * mode_size : (max_mode - 1) * mode_size], + rhs_max_mode_indices, + other_modes_indices[(max_mode - 1) * mode_size :], + ] + ) eqn = eqn_lhs + "->" + eqn_rhs einsum_inputs = [matrix, in_modes] if not pure: @@ -531,6 +662,7 @@ def two_mode_gate(matrix, mode1, mode2, in_modes, pure=True, batched=False): output = tf.einsum(eqn, *einsum_inputs) return output + def single_mode_superop(superop, mode, in_modes, pure=True, batched=False): """rho_out = S[rho_in] (state is always converted to mixed to apply superop) @@ -549,7 +681,9 @@ def single_mode_superop(superop, mode, in_modes, pure=True, batched=False): else: num_modes = (len(in_modes.shape) - batch_offset) // 2 if num_modes > max_len: - raise NotImplementedError("The max number of supported modes for this operation is currently {}".format(max_len)) + raise NotImplementedError( + "The max number of supported modes for this operation is currently {}".format(max_len) + ) else: if pure: in_modes = mixed(in_modes, batched) @@ -558,71 +692,100 @@ def single_mode_superop(superop, mode, in_modes, pure=True, batched=False): batch_index = indices[:batch_offset] superop_indices = indices[batch_offset : batch_offset + 4] state_indices = indices[batch_offset + 4 : batch_offset + 4 + 2 * num_modes] - left_unchanged_indices = state_indices[:2 * mode] + left_unchanged_indices = state_indices[: 2 * mode] right_unchanged_indices = state_indices[2 * mode : 2 * (num_modes - 1)] - eqn_lhs = ",".join([batch_index + superop_indices, batch_index + left_unchanged_indices + superop_indices[1:3] + right_unchanged_indices]) - eqn_rhs = "".join([batch_index, left_unchanged_indices + superop_indices[0] + superop_indices[3] + right_unchanged_indices]) + eqn_lhs = ",".join( + [ + batch_index + superop_indices, + batch_index + + left_unchanged_indices + + superop_indices[1:3] + + right_unchanged_indices, + ] + ) + eqn_rhs = "".join( + [ + batch_index, + left_unchanged_indices + + superop_indices[0] + + superop_indices[3] + + right_unchanged_indices, + ] + ) eqn = "->".join([eqn_lhs, eqn_rhs]) new_state = tf.einsum(eqn, superop, in_modes) return new_state + ################################################################### # Quantum Gate/Channel functions: # Primary quantum optical gates implemented as transformations on 'in_modes' + def phase_shifter(theta, mode, in_modes, D, pure=True, batched=False): """returns phase shift unitary matrix on specified input modes""" matrix = phase_shifter_matrix(theta, D, batched=batched) output = single_mode_gate(matrix, mode, in_modes, pure, batched) return output + def displacement(alpha, mode, in_modes, D, pure=True, batched=False): """returns displacement unitary matrix on specified input modes""" matrix = displacement_matrix(alpha, D, batched) output = single_mode_gate(matrix, mode, in_modes, pure, batched) return output + def squeezer(r, theta, mode, in_modes, D, pure=True, batched=False): """returns squeezer unitary matrix on specified input modes""" matrix = squeezer_matrix(r, theta, D, batched) output = single_mode_gate(matrix, mode, in_modes, pure, batched) return output + def kerr_interaction(kappa, mode, in_modes, D, pure=True, batched=False): """returns Kerr unitary matrix on specified input modes""" matrix = kerr_interaction_matrix(kappa, D, batched) output = single_mode_gate(matrix, mode, in_modes, pure, batched) return output + def cross_kerr_interaction(kappa, mode1, mode2, in_modes, D, pure=True, batched=False): """returns cross-Kerr unitary matrix on specified input modes""" matrix = cross_kerr_interaction_matrix(kappa, D, batched) output = two_mode_gate(matrix, mode1, mode2, in_modes, pure, batched) return output -def cubic_phase(gamma, mode, in_modes, D, hbar=2, pure=True, batched=False, method="self_adjoint_eig"): + +def cubic_phase( + gamma, mode, in_modes, D, hbar=2, pure=True, batched=False, method="self_adjoint_eig" +): """returns cubic phase unitary matrix on specified input modes""" matrix = cubic_phase_matrix(gamma, D, hbar, batched, method=method) output = single_mode_gate(matrix, mode, in_modes, pure, batched) return output + def beamsplitter(t, r, mode1, mode2, in_modes, D, pure=True, batched=False): """returns beamsplitter unitary matrix on specified input modes""" matrix = beamsplitter_matrix(t, r, D, batched) output = two_mode_gate(matrix, mode1, mode2, in_modes, pure, batched) return output + def loss_channel(T, mode, in_modes, D, pure=True, batched=False): """returns loss channel matrix on specified input modes""" superop = loss_superop(T, D, batched) output = single_mode_superop(superop, mode, in_modes, pure, batched) return output + ################################################################### # Other useful functions for organizing modes + def combine_single_modes(modes_list, batched=False): """Group together a list of single modes (each having dim=1 or dim=2) into a composite mode system.""" if batched: @@ -644,7 +807,11 @@ def combine_single_modes(modes_list, batched=False): # 'a,b,c,...,x,y,z->abc...xyz' max_num = max_num_indices - batch_offset if num_modes > max_num: - raise NotImplementedError("The max number of supported modes for this operation with pure states is currently {}".format(max_num)) + raise NotImplementedError( + "The max number of supported modes for this operation with pure states is currently {}".format( + max_num + ) + ) batch_index = indices[:batch_offset] out_str = indices[batch_offset : batch_offset + num_modes] modes_str = ",".join([batch_index + idx for idx in out_str]) @@ -661,10 +828,19 @@ def combine_single_modes(modes_list, batched=False): max_num = (max_num_indices - batch_offset) // 2 batch_index = indices[:batch_offset] if num_modes > max_num: - raise NotImplementedError("The max number of supported modes for this operation with mixed states is currently {}".format(max_num)) - mode_idxs = [indices[slice(batch_offset + idx, batch_offset + idx + 2)] for idx in range(0, 2 * num_modes, 2)] # each mode gets a pair of consecutive indices + raise NotImplementedError( + "The max number of supported modes for this operation with mixed states is currently {}".format( + max_num + ) + ) + mode_idxs = [ + indices[slice(batch_offset + idx, batch_offset + idx + 2)] + for idx in range(0, 2 * num_modes, 2) + ] # each mode gets a pair of consecutive indices eqn_rhs = batch_index + "".join(mode_idxs) - eqn_idxs = [batch_index + m if dims[idx] == 2 else ",".join(m) for idx, m in enumerate(mode_idxs)] + eqn_idxs = [ + batch_index + m if dims[idx] == 2 else ",".join(m) for idx, m in enumerate(mode_idxs) + ] eqn_lhs = ",".join(eqn_idxs) eqn = eqn_lhs + "->" + eqn_rhs einsum_inputs = [] @@ -677,6 +853,7 @@ def combine_single_modes(modes_list, batched=False): combined_modes = tf.einsum(eqn, *einsum_inputs) return combined_modes + def replace_mode(replacement, mode, system, state_is_pure, batched=False): """Replace the subsystem 'mode' of 'system' with new state 'replacement'. Argument 'state_is_pure' indicates whether @@ -687,6 +864,7 @@ def replace_mode(replacement, mode, system, state_is_pure, batched=False): mode = [mode] replace_modes(replacement, mode, system, state_is_pure, batched) + def replace_modes(replacement, modes, system, system_is_pure, batched=False): """Replace the subsystem 'mode' of 'system' with new state 'replacement'. Argument 'system_is_pure' indicates whether 'system' is represented by a pure state or a density matrix. @@ -694,7 +872,7 @@ def replace_modes(replacement, modes, system, system_is_pure, batched=False): Note: expects the shape of both replacement and system to match the batched parameter Note: modes does not need to be ordered. """ - #todo: write a better docstring + # todo: write a better docstring if isinstance(modes, int): modes = [modes] @@ -705,7 +883,7 @@ def replace_modes(replacement, modes, system, system_is_pure, batched=False): num_modes = len(system.shape) - batch_offset if not system_is_pure: - num_modes = int(num_modes/2) + num_modes = int(num_modes / 2) replacement_is_pure = bool(len(replacement.shape) - batch_offset == len(modes)) @@ -721,7 +899,7 @@ def replace_modes(replacement, modes, system, system_is_pure, batched=False): if system_is_pure: system = mixed(system, batched) - #mix the replacement if it is pure + # mix the replacement if it is pure if replacement_is_pure: replacement = mixed(replacement, batched) @@ -733,23 +911,35 @@ def replace_modes(replacement, modes, system, system_is_pure, batched=False): # append mode and insert state (There is also insert_state(), but it seemed unnecessarily complicated to try to generalize this function, which does a lot of manual list comprehension, to the multi mode case than to write the two liner below) # todo: insert_state() could be rewritten to take full advantage of the high level functions of tf. Before doing that different implementatinos should be benchmarked to compare speed and memory requirements, as in practice these methods will be perfomance critical. if not batched: - #todo: remove the hack in the line below and enable the line with axes=0 instead, if ever we change the dependency of SF to tensorflow>=1.6 - #revised_modes = tf.tensordot(reduced_state, replacement, axes=0) - revised_modes = tf.tensordot(tf.expand_dims(reduced_state, 0), tf.expand_dims(replacement, 0), axes=[[0], [0]]) + # todo: remove the hack in the line below and enable the line with axes=0 instead, if ever we change the dependency of SF to tensorflow>=1.6 + # revised_modes = tf.tensordot(reduced_state, replacement, axes=0) + revised_modes = tf.tensordot( + tf.expand_dims(reduced_state, 0), tf.expand_dims(replacement, 0), axes=[[0], [0]] + ) else: batch_size = reduced_state.shape[0].value - #todo: remove the hack in the line below and enabled the line with axes=0 instead, if ever we change the dependency of SF to tensorflow>=1.6 - #revised_modes = tf.stack([tf.tensordot(reduced_state[b], replacement[b], axes=0) for b in range(batch_size)]) - revised_modes = tf.stack([tf.tensordot(tf.expand_dims(reduced_state[b], 0), tf.expand_dims(replacement[b], 0), axes=[[0], [0]]) for b in range(batch_size)]) + # todo: remove the hack in the line below and enabled the line with axes=0 instead, if ever we change the dependency of SF to tensorflow>=1.6 + # revised_modes = tf.stack([tf.tensordot(reduced_state[b], replacement[b], axes=0) for b in range(batch_size)]) + revised_modes = tf.stack( + [ + tf.tensordot( + tf.expand_dims(reduced_state[b], 0), + tf.expand_dims(replacement[b], 0), + axes=[[0], [0]], + ) + for b in range(batch_size) + ] + ) revised_modes_pure = False # unless the preparation was meant to go into the last modes in the standard order, we need to swap indices around - if modes != list(range(num_modes-len(modes), num_modes)): + if modes != list(range(num_modes - len(modes), num_modes)): mode_permutation = [x for x in range(num_modes) if x not in modes] + modes revised_modes = reorder_modes(revised_modes, mode_permutation, revised_modes_pure, batched) return revised_modes + def reorder_modes(state, mode_permutation, pure, batched): """Reorder the indices of states according to the given mode_permutation, needs to know whether the state is pure and/or batched.""" if pure: @@ -757,10 +947,12 @@ def reorder_modes(state, mode_permutation, pure, batched): index_permutation = mode_permutation else: scale = 2 - index_permutation = [scale*x+i for x in mode_permutation for i in (0, 1)] #two indices per mode if we have pure states + index_permutation = [ + scale * x + i for x in mode_permutation for i in (0, 1) + ] # two indices per mode if we have pure states if batched: - index_permutation = [0] + [i+1 for i in index_permutation] + index_permutation = [0] + [i + 1 for i in index_permutation] index_permutation = np.argsort(index_permutation) @@ -768,6 +960,7 @@ def reorder_modes(state, mode_permutation, pure, batched): return reordered_modes + def insert_state(state, system, state_is_pure, mode=None, batched=False): """ Append a new mode (at slot 'mode') to system and initialize it in 'state'. @@ -789,7 +982,7 @@ def insert_state(state, system, state_is_pure, mode=None, batched=False): if mode is None or mode >= num_modes: mode = num_modes - if len(system.shape) == 0: # pylint: disable=len-as-condition + if len(system.shape) == 0: # pylint: disable=len-as-condition # no modes in system # pylint: disable=no-else-return if len(state.shape) - batch_offset == 1: @@ -800,7 +993,9 @@ def insert_state(state, system, state_is_pure, mode=None, batched=False): elif len(state.shape) - batch_offset == 2: return state else: - raise ValueError("'state' must have dim={} or dim={}".format(1 - batch_offset, 2 - batch_offset)) + raise ValueError( + "'state' must have dim={} or dim={}".format(1 - batch_offset, 2 - batch_offset) + ) else: # modes in system if len(state.shape) == batch_offset + 1 and state_is_pure: @@ -816,15 +1011,20 @@ def insert_state(state, system, state_is_pure, mode=None, batched=False): batch_index = indices[:batch_offset] left_part = indices[batch_offset : batch_offset + mode * mode_size] - middle_part = indices[batch_offset + mode * mode_size : batch_offset + (mode + 1) * mode_size] - right_part = indices[batch_offset + (mode + 1) * mode_size : batch_offset + (num_modes + 1) * mode_size] + middle_part = indices[ + batch_offset + mode * mode_size : batch_offset + (mode + 1) * mode_size + ] + right_part = indices[ + batch_offset + (mode + 1) * mode_size : batch_offset + (num_modes + 1) * mode_size + ] eqn_lhs = batch_index + left_part + right_part + "," + batch_index + middle_part eqn_rhs = batch_index + left_part + middle_part + right_part - eqn = eqn_lhs + '->' + eqn_rhs + eqn = eqn_lhs + "->" + eqn_rhs revised_modes = tf.einsum(eqn, system, state) return revised_modes + def partial_trace(system, mode, state_is_pure, batched=False): """ Trace out subsystem 'mode' from 'system'. @@ -847,11 +1047,16 @@ def partial_trace(system, mode, state_is_pure, batched=False): # requires subsystem to be traced out to be at end # i.e., ab...klmnop...yz goes to ab...klop...yzmn indices_list = [m for m in range(batch_offset + 2 * num_modes)] - dim_list = indices_list[ : batch_offset + 2 * mode] + indices_list[batch_offset + 2 * (mode + 1):] + indices_list[batch_offset + 2 * mode: batch_offset + 2 * (mode + 1)] + dim_list = ( + indices_list[: batch_offset + 2 * mode] + + indices_list[batch_offset + 2 * (mode + 1) :] + + indices_list[batch_offset + 2 * mode : batch_offset + 2 * (mode + 1)] + ) permuted_sys = tf.transpose(system, dim_list) reduced_state = tf.trace(permuted_sys) return reduced_state + def reduced_density_matrix(system, mode, state_is_pure, batched=False): """ Trace out all subsystems except 'mode' from 'system'. @@ -866,12 +1071,13 @@ def reduced_density_matrix(system, mode, state_is_pure, batched=False): batch_offset = 1 else: batch_offset = 0 - num_modes = (num_indices - batch_offset) // 2 # always mixed + num_modes = (num_indices - batch_offset) // 2 # always mixed for m in range(num_modes): if m != mode: reduced_state = partial_trace(reduced_state, m, False, batched) return reduced_state + def conditional_state(system, projector, mode, state_is_pure, batched=False): """Compute the (unnormalized) conditional state of 'system' after applying ket 'projector' to 'mode'.""" # basic_form (pure states): abc...ijk...xyz,j-> abc...ik...xyz @@ -890,31 +1096,40 @@ def conditional_state(system, projector, mode, state_is_pure, batched=False): num_modes = (num_indices - batch_offset) // mode_size max_num = (max_num_indices - batch_offset) // num_modes if num_modes > max_num: - raise ValueError("Conditional state projection currently only supported for {} modes.".format(max_num)) + raise ValueError( + "Conditional state projection currently only supported for {} modes.".format(max_num) + ) batch_index = indices[:batch_offset] mode_indices = indices[batch_offset : batch_offset + num_modes * mode_size] projector_indices = mode_indices[:mode_size] free_mode_indices = mode_indices[mode_size : num_modes * mode_size] - state_lhs = batch_index + free_mode_indices[:mode * mode_size] + projector_indices + free_mode_indices[mode * mode_size:] + state_lhs = ( + batch_index + + free_mode_indices[: mode * mode_size] + + projector_indices + + free_mode_indices[mode * mode_size :] + ) projector_lhs = batch_index + projector_indices[0] if mode_size == 2: projector_lhs += "," + batch_index + projector_indices[1] eqn_lhs = ",".join([state_lhs, projector_lhs]) eqn_rhs = batch_index + free_mode_indices - eqn = eqn_lhs + '->' + eqn_rhs + eqn = eqn_lhs + "->" + eqn_rhs einsum_args = [system, tf.conj(projector)] if not state_is_pure: einsum_args.append(projector) cond_state = tf.einsum(eqn, *einsum_args) if not batched: - cond_state = tf.squeeze(cond_state, 0) # drop fake batch dimension + cond_state = tf.squeeze(cond_state, 0) # drop fake batch dimension return cond_state + ################################################################### # Helpful auxiliary functions for ops + def ladder_ops(D): """returns the matrix representation of the annihilation and creation operators""" ind = [(i - 1, i) for i in range(1, D)] @@ -924,23 +1139,36 @@ def ladder_ops(D): ad = tf.transpose(tf.conj(a), [1, 0]) return a, ad + def H(n, x): """Explicit expression for Hermite polynomials.""" prefactor = factorial(n) - terms = tf.reduce_sum(tf.stack([_numer_safe_power(-1, m) / (factorial(m) * factorial(n - 2 * m)) * - _numer_safe_power(2 * x, n - 2 * m) - for m in range(int(np.floor(n / 2)) + 1)], axis=0), axis=0) + terms = tf.reduce_sum( + tf.stack( + [ + _numer_safe_power(-1, m) + / (factorial(m) * factorial(n - 2 * m)) + * _numer_safe_power(2 * x, n - 2 * m) + for m in range(int(np.floor(n / 2)) + 1) + ], + axis=0, + ), + axis=0, + ) return prefactor * terms + def H_n_plus_1(H_n, H_n_m1, n, x): """Recurrent definition of Hermite polynomials.""" H_n_p1 = 2 * x * H_n - 2 * n * H_n_m1 return H_n_p1 + ################################################################### # Helper functions + def _check_for_eval(kwargs): """ Helper function to check keyword arguments for user-supplied information about how numerical evaluation should proceed, diff --git a/strawberryfields/backends/tfbackend/states.py b/strawberryfields/backends/tfbackend/states.py index fefade839..205aaa1b6 100644 --- a/strawberryfields/backends/tfbackend/states.py +++ b/strawberryfields/backends/tfbackend/states.py @@ -35,13 +35,26 @@ class FockStateTF(BaseFockState): eval (bool): indicates the default return behaviour for the class instance (symbolic when eval=False, numerical when eval=True) """ - def __init__(self, state_data, num_modes, pure, cutoff_dim, graph, batched=False, mode_names=None, eval=True): + def __init__( + self, + state_data, + num_modes, + pure, + cutoff_dim, + graph, + batched=False, + mode_names=None, + eval=True, + ): # pylint: disable=too-many-arguments - state_data = tf.convert_to_tensor(state_data, name="state_data") # convert everything to a Tensor so we have the option to do symbolic evaluations + state_data = tf.convert_to_tensor( + state_data, name="state_data" + ) # convert everything to a Tensor so we have the option to do symbolic evaluations super().__init__(state_data, num_modes, pure, cutoff_dim, mode_names) self._batched = batched - self._str = "". \ - format(self.num_modes, self.cutoff_dim, self._pure, self._batched, self._hbar) + self._str = "".format( + self.num_modes, self.cutoff_dim, self._pure, self._batched, self._hbar + ) self._graph = graph self._eval = eval @@ -59,7 +72,11 @@ def _run(self, t, **kwargs): """ with self._graph.as_default(): if "eval" not in kwargs: - kwargs["eval"] = self._eval # when the caller did not specify, use the default behaviour of this instance + kwargs[ + "eval" + ] = ( + self._eval + ) # when the caller did not specify, use the default behaviour of this instance evaluate, session, feed_dict, close_session = _check_for_eval(kwargs) if evaluate: @@ -90,7 +107,7 @@ def trace(self, **kwargs): with self._graph.as_default(): s = self.data if not self.batched: - s = tf.expand_dims(s, 0) # insert fake batch dimension + s = tf.expand_dims(s, 0) # insert fake batch dimension if self.is_pure: flat_state = tf.reshape(s, [-1, self.cutoff_dim ** self.num_modes]) tr = tf.reduce_sum(flat_state * tf.conj(flat_state), 1) @@ -100,7 +117,7 @@ def trace(self, **kwargs): for _ in range(self.num_modes): tr = tf.trace(tr) if not self.batched: - tr = tf.squeeze(tr, 0) # drop fake batch dimension + tr = tf.squeeze(tr, 0) # drop fake batch dimension tr = self._run(tr, **kwargs) return tr @@ -135,7 +152,7 @@ def fock_prob(self, n, **kwargs): state = self.data if not self.batched: - state = tf.expand_dims(state, 0) # add fake batch dimension + state = tf.expand_dims(state, 0) # add fake batch dimension # put batch dimension at end to match behaviour of tf.gather_nd if self.is_pure: @@ -187,7 +204,7 @@ def all_fock_probs(self, **kwargs): state = self.data if not self.batched: - state = tf.expand_dims(state, 0) # add fake batch dimension + state = tf.expand_dims(state, 0) # add fake batch dimension if self.is_pure: probs = tf.abs(state) ** 2 @@ -196,7 +213,9 @@ def all_fock_probs(self, **kwargs): rng = np.arange(1, 2 * self.num_modes + 1, 2) perm = np.concatenate([[0], rng, rng + 1]) perm_state = tf.transpose(state, perm) - probs = tf.map_fn(tf.diag_part, perm_state) # diag_part doesn't work with batches, need to use map_fn + probs = tf.map_fn( + tf.diag_part, perm_state + ) # diag_part doesn't work with batches, need to use map_fn if not self.batched: probs = tf.squeeze(probs, 0) # drop fake batch dimension @@ -227,22 +246,24 @@ def fidelity(self, other_state, mode, **kwargs): float/Tensor: the numerical value, or an unevaluated Tensor object, for the fidelity. """ with self.graph.as_default(): - rho = self.reduced_dm([mode]) # don't pass kwargs yet + rho = self.reduced_dm([mode]) # don't pass kwargs yet if not self.batched: - rho = tf.expand_dims(rho, 0) # add fake batch dimension + rho = tf.expand_dims(rho, 0) # add fake batch dimension if len(other_state.shape) == 1: - other_state = tf.expand_dims(other_state, 0) # add batch dimension for state + other_state = tf.expand_dims(other_state, 0) # add batch dimension for state other_state = tf.cast(other_state, def_type) - state_dm = tf.einsum('bi,bj->bij', tf.conj(other_state), other_state) + state_dm = tf.einsum("bi,bj->bij", tf.conj(other_state), other_state) flat_state_dm = tf.reshape(state_dm, [1, -1]) flat_rho = tf.reshape(rho, [-1, self.cutoff_dim ** 2]) - f = tf.real(tf.reduce_sum(flat_rho * flat_state_dm, axis=1)) # implements a batched tr(rho|s>" + indices[:self._modes] - multi_cohs_vec = np.einsum(eqn, *multi_cohs_list) # tensor product of specified coherent states - flat_multi_cohs = np.reshape(multi_cohs_vec, [1, self.cutoff_dim ** self.num_modes]) # flattened tensor product; shape is: [1, cutoff_dim * num_modes] + s = tf.expand_dims(s, 0) # introduce fake batch dimension + + coh = lambda a, dim: [ + np.exp(-0.5 * np.abs(a) ** 2) * (a) ** n / np.sqrt(factorial(n)) for n in range(dim) + ] + multi_cohs_list = [ + coh(a, self.cutoff_dim) for a in alpha_list + ] # shape is: [num_modes, cutoff_dim] + eqn = ",".join(indices[: self._modes]) + "->" + indices[: self._modes] + multi_cohs_vec = np.einsum( + eqn, *multi_cohs_list + ) # tensor product of specified coherent states + flat_multi_cohs = np.reshape( + multi_cohs_vec, [1, self.cutoff_dim ** self.num_modes] + ) # flattened tensor product; shape is: [1, cutoff_dim * num_modes] if self.is_pure: flat_state = tf.reshape(s, [-1, self.cutoff_dim ** self.num_modes]) @@ -296,12 +325,26 @@ def fidelity_coherent(self, alpha_list, **kwargs): else: batch_index = indices[0] free_indices = indices[1:] - bra_indices = free_indices[:self.num_modes] + bra_indices = free_indices[: self.num_modes] ket_indices = free_indices[self.num_modes : 2 * self.num_modes] - eqn = bra_indices + "," + batch_index + "".join(bra_indices[idx] + ket_indices[idx] for idx in range(self.num_modes)) + "," + ket_indices + "->" + batch_index - f = tf.einsum(eqn, tf.convert_to_tensor(np.conj(multi_cohs_vec), dtype=def_type), s, tf.convert_to_tensor(multi_cohs_vec, def_type)) + eqn = ( + bra_indices + + "," + + batch_index + + "".join(bra_indices[idx] + ket_indices[idx] for idx in range(self.num_modes)) + + "," + + ket_indices + + "->" + + batch_index + ) + f = tf.einsum( + eqn, + tf.convert_to_tensor(np.conj(multi_cohs_vec), dtype=def_type), + s, + tf.convert_to_tensor(multi_cohs_vec, def_type), + ) if not self.batched: - f = tf.squeeze(f, 0) # drop fake batch dimension + f = tf.squeeze(f, 0) # drop fake batch dimension f = tf.identity(f, name="fidelity_coherent") f = self._run(f, **kwargs) @@ -331,7 +374,11 @@ def fidelity_vacuum(self, **kwargs): with self._graph.as_default(): mode_size = 1 if self.is_pure else 2 if self.batched: - vac_elem = tf.real(tf.reshape(self.data, [-1, self.cutoff_dim ** (self.num_modes * mode_size)])[:, 0]) + vac_elem = tf.real( + tf.reshape(self.data, [-1, self.cutoff_dim ** (self.num_modes * mode_size)])[ + :, 0 + ] + ) else: vac_elem = tf.abs(tf.reshape(self.data, [-1])[0]) ** 2 @@ -361,7 +408,7 @@ def is_vacuum(self, tol=0.0, **kwargs): bool/Tensor: the boolean value, or an unevaluated Tensor object, for whether the state is in vacuum. """ with self._graph.as_default(): - fidel = self.fidelity_vacuum() # dont pass on kwargs yet + fidel = self.fidelity_vacuum() # dont pass on kwargs yet is_vac = tf.less_equal(1 - fidel, tol, name="is_vacuum") is_vac = self._run(is_vac, **kwargs) return is_vac @@ -397,8 +444,10 @@ def reduced_dm(self, modes, **kwargs): if modes != sorted(modes): raise ValueError("The specified modes cannot be duplicated.") if len(modes) > self.num_modes: - raise ValueError("The number of specified modes cannot " - "be larger than the number of subsystems.") + raise ValueError( + "The number of specified modes cannot " + "be larger than the number of subsystems." + ) reduced = self.dm(**kwargs) for m in modes: @@ -408,7 +457,7 @@ def reduced_dm(self, modes, **kwargs): s = self._run(s, **kwargs) return s - def quad_expectation(self, mode, phi=0., **kwargs): + def quad_expectation(self, mode, phi=0.0, **kwargs): r""" Compute the expectation value of the quadrature operator :math:`\hat{x}_\phi` for the reduced state on the specified mode. May be numerical or symbolic. @@ -430,36 +479,44 @@ def quad_expectation(self, mode, phi=0., **kwargs): float/Tensor: the numerical value, or an unevaluated Tensor object, for the expectation value """ with self.graph.as_default(): - rho = self.reduced_dm([mode]) # don't pass kwargs yet + rho = self.reduced_dm([mode]) # don't pass kwargs yet phi = tf.convert_to_tensor(phi) - if self.batched and len(phi.shape) == 0: #pylint: disable=len-as-condition + if self.batched and len(phi.shape) == 0: # pylint: disable=len-as-condition phi = tf.expand_dims(phi, 0) - larger_cutoff = self.cutoff_dim + 1 # start one dimension higher to avoid truncation errors + larger_cutoff = ( + self.cutoff_dim + 1 + ) # start one dimension higher to avoid truncation errors R = phase_shifter_matrix(phi, larger_cutoff, batched=self.batched) if not self.batched: - R = tf.expand_dims(R, 0) # add fake batch dimension + R = tf.expand_dims(R, 0) # add fake batch dimension a, ad = ladder_ops(larger_cutoff) - x = np.sqrt(self._hbar / 2.) * (a + ad) - x = tf.expand_dims(tf.cast(x, def_type), 0) # add batch dimension to x + x = np.sqrt(self._hbar / 2.0) * (a + ad) + x = tf.expand_dims(tf.cast(x, def_type), 0) # add batch dimension to x quad = tf.conj(R) @ x @ R - quad2 = (quad @ quad)[:, :self.cutoff_dim, :self.cutoff_dim] - quad = quad[:, :self.cutoff_dim, :self.cutoff_dim] # drop highest dimension; remaining array gives correct truncated x**2 + quad2 = (quad @ quad)[:, : self.cutoff_dim, : self.cutoff_dim] + quad = quad[ + :, : self.cutoff_dim, : self.cutoff_dim + ] # drop highest dimension; remaining array gives correct truncated x**2 if not self.batched: - rho = tf.expand_dims(rho, 0) # add fake batch dimension + rho = tf.expand_dims(rho, 0) # add fake batch dimension flat_rho = tf.reshape(rho, [-1, self.cutoff_dim ** 2]) flat_quad = tf.reshape(quad, [1, self.cutoff_dim ** 2]) flat_quad2 = tf.reshape(quad2, [1, self.cutoff_dim ** 2]) - e = tf.real(tf.reduce_sum(flat_rho * flat_quad, axis=1)) # implements a batched tr(rho * x) - e2 = tf.real(tf.reduce_sum(flat_rho * flat_quad2, axis=1)) # implements a batched tr(rho * x ** 2) + e = tf.real( + tf.reduce_sum(flat_rho * flat_quad, axis=1) + ) # implements a batched tr(rho * x) + e2 = tf.real( + tf.reduce_sum(flat_rho * flat_quad2, axis=1) + ) # implements a batched tr(rho * x ** 2) v = e2 - e ** 2 if not self.batched: - e = tf.squeeze(e, 0) # drop fake batch dimension + e = tf.squeeze(e, 0) # drop fake batch dimension v = tf.squeeze(v, 0) # drop fake batch dimension e = tf.identity(e, name="quad_expectation") @@ -488,23 +545,27 @@ def mean_photon(self, mode, **kwargs): tuple(float/Tensor): tuple containing the numerical value, or an unevaluated Tensor object, for the mean photon number and variance. """ with self.graph.as_default(): - rho = self.reduced_dm([mode]) # don't pass kwargs yet + rho = self.reduced_dm([mode]) # don't pass kwargs yet if not self.batched: - rho = tf.expand_dims(rho, 0) # add fake batch dimension + rho = tf.expand_dims(rho, 0) # add fake batch dimension n = np.diag(np.arange(self.cutoff_dim)) flat_n = np.reshape(n, [1, self.cutoff_dim ** 2]) flat_rho = tf.reshape(rho, [-1, self.cutoff_dim ** 2]) - nbar = tf.real(tf.reduce_sum(flat_rho * flat_n, axis=1)) # implements a batched tr(rho * n) - nbarSq = tf.real(tf.reduce_sum(flat_rho * flat_n**2, axis=1)) # implements a batched tr(rho * n^2) - var = nbarSq - nbar**2 + nbar = tf.real( + tf.reduce_sum(flat_rho * flat_n, axis=1) + ) # implements a batched tr(rho * n) + nbarSq = tf.real( + tf.reduce_sum(flat_rho * flat_n ** 2, axis=1) + ) # implements a batched tr(rho * n^2) + var = nbarSq - nbar ** 2 if not self.batched: - nbar = tf.squeeze(nbar, 0) # drop fake batch dimension - var = tf.squeeze(var, 0) # drop fake batch dimension + nbar = tf.squeeze(nbar, 0) # drop fake batch dimension + var = tf.squeeze(var, 0) # drop fake batch dimension nbar = tf.identity(nbar, name="mean_photon") var = tf.identity(var, name="mean_photon_variance") @@ -566,13 +627,17 @@ def dm(self, **kwargs): batch_index = indices[0] free_indices = indices[1:] else: - batch_index = '' + batch_index = "" free_indices = indices ket = self.data - left_str = [batch_index] + [free_indices[i] for i in range(0, 2 * self.num_modes, 2)] - right_str = [batch_index] + [free_indices[i] for i in range(1, 2 * self.num_modes, 2)] + left_str = [batch_index] + [ + free_indices[i] for i in range(0, 2 * self.num_modes, 2) + ] + right_str = [batch_index] + [ + free_indices[i] for i in range(1, 2 * self.num_modes, 2) + ] out_str = [batch_index] + [free_indices[: 2 * self.num_modes]] - einstr = ''.join(left_str + [','] + right_str + ['->'] + out_str) + einstr = "".join(left_str + [","] + right_str + ["->"] + out_str) s = tf.einsum(einstr, ket, tf.conj(ket)) else: s = tf.identity(self.data, name="density_matrix") @@ -609,10 +674,12 @@ def wigner(self, mode, xvec, pvec): if self._eval and not self.batched: return super().wigner(mode, xvec, pvec) - raise NotImplementedError("Calculation of the Wigner function is currently " - "only supported when eval=True and batched=False") + raise NotImplementedError( + "Calculation of the Wigner function is currently " + "only supported when eval=True and batched=False" + ) - def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs): # pragma: no cover + def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs): # pragma: no cover r"""The multi-mode expectation values and variance of arbitrary 2nd order polynomials of quadrature operators. @@ -671,7 +738,11 @@ def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs): # pragma: no c """ with self._graph.as_default(): if "eval" not in kwargs: - kwargs["eval"] = self._eval # when the caller did not specify, use the default behaviour of this instance + kwargs[ + "eval" + ] = ( + self._eval + ) # when the caller did not specify, use the default behaviour of this instance evaluate, session, feed_dict, close_session = _check_for_eval(kwargs) if evaluate and not self.batched: @@ -680,8 +751,10 @@ def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs): # pragma: no c session.close() return super().poly_quad_expectation(A, d, k, phi, **kwargs) else: - raise NotImplementedError("Calculation of multi-mode quadratic expectation values is currently " - "only supported when batched=False and when evaluating numerically.") + raise NotImplementedError( + "Calculation of multi-mode quadratic expectation values is currently " + "only supported when batched=False and when evaluating numerically." + ) @property def batched(self): diff --git a/strawberryfields/circuitdrawer.py b/strawberryfields/circuitdrawer.py index e00e890b5..9f9e45d79 100644 --- a/strawberryfields/circuitdrawer.py +++ b/strawberryfields/circuitdrawer.py @@ -84,7 +84,15 @@ CIRCUIT_BODY_TERMINATOR = "}\n" CIRCUIT_BODY_START = " {" + "\n" INIT_DOCUMENT = ( - DOCUMENT_CLASS + "\n" + EMPTY_PAGESTYLE + "\n" + QCIRCUIT_PACKAGE + "\n" + BEGIN_DOCUMENT + "\n" + CIRCUIT_START + DOCUMENT_CLASS + + "\n" + + EMPTY_PAGESTYLE + + "\n" + + QCIRCUIT_PACKAGE + + "\n" + + BEGIN_DOCUMENT + + "\n" + + CIRCUIT_START ) PIPE = "|" @@ -546,9 +554,7 @@ def _end_wire(self): def _apply_spacing(self): """Applies wire and operator visual spacing.""" if self._column_spacing is not None: - self._document += Circuit._pad_with_spaces( - COLUMN_SPACING.format(self._column_spacing) - ) + self._document += Circuit._pad_with_spaces(COLUMN_SPACING.format(self._column_spacing)) if self._row_spacing is not None: self._document += Circuit._pad_with_spaces(ROW_SPACING.format(self._row_spacing)) diff --git a/strawberryfields/circuitspecs/X8.py b/strawberryfields/circuitspecs/X8.py index 3a3c6adb0..cc235425a 100644 --- a/strawberryfields/circuitspecs/X8.py +++ b/strawberryfields/circuitspecs/X8.py @@ -78,6 +78,7 @@ class X8Specs(CircuitSpecs): """Circuit specifications for the X8 class of circuits.""" + short_name = "X8" modes = 8 remote = True @@ -89,7 +90,7 @@ class X8Specs(CircuitSpecs): primitives = {"S2gate", "MeasureFock", "Rgate", "BSgate", "MZgate"} decompositions = { "Interferometer": {"mesh": "rectangular_symmetric", "drop_identity": False}, - "BipartiteGraphEmbed": {"mesh": "rectangular_symmetric", "drop_identity": False} + "BipartiteGraphEmbed": {"mesh": "rectangular_symmetric", "drop_identity": False}, } circuit = X8_CIRCUIT.format(target=short_name) @@ -164,7 +165,9 @@ def compile(self, seq, registers): # Compile the unitary: combine and then decompose all unitaries # ------------------------------------------------------------- - A, B, C = group_operations(seq, lambda x: isinstance(x, (ops.Rgate, ops.BSgate, ops.MZgate))) + A, B, C = group_operations( + seq, lambda x: isinstance(x, (ops.Rgate, ops.BSgate, ops.MZgate)) + ) # begin unitary lists for mode [0, 1, 2, 3] and modes [4, 5, 6, 7] with # two identity matrices. This is because multi_dot requires @@ -242,5 +245,6 @@ def compile(self, seq, registers): class X8_01(X8Specs): """Circuit specifications for the X8_01 class of circuits.""" + short_name = "X8_01" circuit = X8_CIRCUIT.format(target=short_name) diff --git a/strawberryfields/circuitspecs/__init__.py b/strawberryfields/circuitspecs/__init__.py index aff57c2c4..e1c868770 100644 --- a/strawberryfields/circuitspecs/__init__.py +++ b/strawberryfields/circuitspecs/__init__.py @@ -45,7 +45,18 @@ from .tensorflow import TFSpecs from .gaussian_unitary import GaussianUnitary -specs = (X8Specs, X8_01, X12Specs, X12_01, X12_02, FockSpecs, GaussianSpecs, GBSSpecs, TFSpecs, GaussianUnitary) +specs = ( + X8Specs, + X8_01, + X12Specs, + X12_01, + X12_02, + FockSpecs, + GaussianSpecs, + GBSSpecs, + TFSpecs, + GaussianUnitary, +) circuit_db = {c.short_name: c for c in specs} """dict[str, ~strawberryfields.circuitspecs.CircuitSpecs]: Map from circuit diff --git a/strawberryfields/circuitspecs/circuit_specs.py b/strawberryfields/circuitspecs/circuit_specs.py index 151f343c3..78acd0960 100644 --- a/strawberryfields/circuitspecs/circuit_specs.py +++ b/strawberryfields/circuitspecs/circuit_specs.py @@ -42,7 +42,7 @@ class CircuitSpecs(abc.ABC): This information is used e.g., in :meth:`.Program.compile` for validation and compilation. """ - short_name = '' + short_name = "" """str: short name of the circuit class""" @property @@ -198,11 +198,11 @@ def compile(self, seq, registers): # compared, rather than using Command objects. mapping = {i: n.op.__class__.__name__ for i, n in enumerate(DAG.nodes())} circuit = nx.convert_node_labels_to_integers(DAG) - nx.set_node_attributes(circuit, mapping, name='name') + nx.set_node_attributes(circuit, mapping, name="name") def node_match(n1, n2): """Returns True if both nodes have the same name""" - return n1['name'] == n2['name'] + return n1["name"] == n2["name"] # check if topology matches if not nx.is_isomorphic(circuit, self.graph, node_match): @@ -252,13 +252,17 @@ class attributes to determine whether a command should be decomposed. op_name = cmd.op.__class__.__name__ if op_name in self.decompositions: # target can implement this op decomposed - if hasattr(cmd.op, 'decomp') and not cmd.op.decomp: + if hasattr(cmd.op, "decomp") and not cmd.op.decomp: # user has requested application of the op as a primitive if op_name in self.primitives: compiled.append(cmd) continue else: - raise pu.CircuitError("The operation {} is not a primitive for the target '{}'".format(cmd.op.__class__.__name__, self.short_name)) + raise pu.CircuitError( + "The operation {} is not a primitive for the target '{}'".format( + cmd.op.__class__.__name__, self.short_name + ) + ) try: kwargs = self.decompositions[op_name] temp = cmd.op.decompose(cmd.reg, **kwargs) @@ -275,6 +279,10 @@ class attributes to determine whether a command should be decomposed. compiled.append(cmd) else: - raise pu.CircuitError("The operation {} cannot be used with the target '{}'.".format(cmd.op.__class__.__name__, self.short_name)) + raise pu.CircuitError( + "The operation {} cannot be used with the target '{}'.".format( + cmd.op.__class__.__name__, self.short_name + ) + ) return compiled diff --git a/strawberryfields/circuitspecs/fock.py b/strawberryfields/circuitspecs/fock.py index 583224f01..4ef4bf907 100644 --- a/strawberryfields/circuitspecs/fock.py +++ b/strawberryfields/circuitspecs/fock.py @@ -18,7 +18,7 @@ class FockSpecs(CircuitSpecs): """Circuit specifications for the Fock backend.""" - short_name = 'fock' + short_name = "fock" modes = None local = True remote = True diff --git a/strawberryfields/circuitspecs/gaussian.py b/strawberryfields/circuitspecs/gaussian.py index c306f501b..1941eb92b 100644 --- a/strawberryfields/circuitspecs/gaussian.py +++ b/strawberryfields/circuitspecs/gaussian.py @@ -18,7 +18,7 @@ class GaussianSpecs(CircuitSpecs): """Circuit specifications for the Gaussian backend.""" - short_name = 'gaussian' + short_name = "gaussian" modes = None local = True remote = True diff --git a/strawberryfields/circuitspecs/gaussian_unitary.py b/strawberryfields/circuitspecs/gaussian_unitary.py index ca1d77140..e288b31b0 100644 --- a/strawberryfields/circuitspecs/gaussian_unitary.py +++ b/strawberryfields/circuitspecs/gaussian_unitary.py @@ -17,9 +17,18 @@ from strawberryfields.program_utils import Command from strawberryfields import ops from strawberryfields.parameters import par_evaluate -from thewalrus.symplectic import expand_vector, expand, rotation, squeezing, two_mode_squeezing, interferometer, beam_splitter +from thewalrus.symplectic import ( + expand_vector, + expand, + rotation, + squeezing, + two_mode_squeezing, + interferometer, + beam_splitter, +) from .circuit_specs import CircuitSpecs + class GaussianUnitary(CircuitSpecs): """Compiler to arrange a Gaussian quantum circuit into the canonical Symplectic form. @@ -77,8 +86,8 @@ class GaussianUnitary(CircuitSpecs): # multi mode gates "BSgate", "S2gate", - "Interferometer", # Note that interferometer is accepted as a primitive - "GaussianTransform", # Note that GaussianTransform is accepted as a primitive + "Interferometer", # Note that interferometer is accepted as a primitive + "GaussianTransform", # Note that GaussianTransform is accepted as a primitive } decompositions = { @@ -93,7 +102,7 @@ class GaussianUnitary(CircuitSpecs): "Zgate": {}, "Fouriergate": {}, } - #pylint: disable=too-many-branches + # pylint: disable=too-many-branches def compile(self, seq, registers): """Try to arrange a quantum circuit into the canonical Symplectic form. @@ -152,11 +161,13 @@ def compile(self, seq, registers): interferometer(params[0]), [dict_indices[mode] for mode in modes], nmodes ) elif name == "GaussianTransform": + S = expand(params[0], [dict_indices[mode] for mode in modes], nmodes) + elif name == "BSgate": S = expand( - params[0], [dict_indices[mode] for mode in modes], nmodes + beam_splitter(params[0], params[1]), + [dict_indices[modes[0]], dict_indices[modes[1]]], + nmodes, ) - elif name == "BSgate": - S = expand(beam_splitter(params[0], params[1]), [dict_indices[modes[0]], dict_indices[modes[1]]], nmodes) Snet = S @ Snet rnet = S @ rnet diff --git a/strawberryfields/circuitspecs/gbs.py b/strawberryfields/circuitspecs/gbs.py index f75491a67..e58c29e5d 100644 --- a/strawberryfields/circuitspecs/gbs.py +++ b/strawberryfields/circuitspecs/gbs.py @@ -22,7 +22,7 @@ class GBSSpecs(GaussianSpecs): """Circuit specifications for the general GBS class of circuits.""" - short_name = 'gbs' + short_name = "gbs" primitives = { # meta operations "All", @@ -48,14 +48,9 @@ class GBSSpecs(GaussianSpecs): "Sgate", "Rgate", "Fouriergate", - "BSgate" + "BSgate", } - decompositions = { - "Xgate": {}, - "Zgate": {}, - "Fouriergate": {}, - "S2gate":{} -, } + decompositions = {"Xgate": {}, "Zgate": {}, "Fouriergate": {}, "S2gate": {}} def compile(self, seq, registers): """Try to arrange a quantum circuit into a form suitable for Gaussian boson sampling. @@ -79,25 +74,25 @@ def compile(self, seq, registers): # C should be empty if C: - raise CircuitError('Operations following the Fock measurements.') + raise CircuitError("Operations following the Fock measurements.") # A should only contain Gaussian operations # (but this is already guaranteed by group_operations() and our primitive set) # without Fock measurements GBS is pointless if not B: - raise CircuitError('GBS circuits must contain Fock measurements.') + raise CircuitError("GBS circuits must contain Fock measurements.") # there should be only Fock measurements in B measured = set() for cmd in B: if not isinstance(cmd.op, ops.MeasureFock): - raise CircuitError('The Fock measurements are not consecutive.') + raise CircuitError("The Fock measurements are not consecutive.") # combine the Fock measurements temp = set(cmd.reg) if measured & temp: - raise CircuitError('Measuring the same mode more than once.') + raise CircuitError("Measuring the same mode more than once.") measured |= temp # replace B with a single Fock measurement diff --git a/strawberryfields/circuitspecs/tensorflow.py b/strawberryfields/circuitspecs/tensorflow.py index 46942710d..07bc46e11 100644 --- a/strawberryfields/circuitspecs/tensorflow.py +++ b/strawberryfields/circuitspecs/tensorflow.py @@ -18,7 +18,7 @@ class TFSpecs(CircuitSpecs): """Circuit specifications for the TensorFlow backend.""" - short_name = 'tf' + short_name = "tf" modes = None local = True remote = False diff --git a/strawberryfields/cli/__init__.py b/strawberryfields/cli/__init__.py index 5ae6ed8bb..f0d02c524 100755 --- a/strawberryfields/cli/__init__.py +++ b/strawberryfields/cli/__init__.py @@ -20,8 +20,7 @@ import sys from strawberryfields.api import Connection -from strawberryfields.configuration import (ConfigurationError, create_config, - store_account) +from strawberryfields.configuration import ConfigurationError, create_config, store_account from strawberryfields.engine import RemoteEngine from strawberryfields.io import load @@ -60,6 +59,7 @@ def main(): else: parser.print_help() + def create_parser(): """Creates a parser to process the commands and arguments passed to the command line interface. @@ -69,7 +69,7 @@ def create_parser(): options """ parser = argparse.ArgumentParser( - description="See below for available options and commands for working with the Xanadu cloud platform.", + description="See below for available options and commands for working with the Xanadu cloud platform." ) # Setting a title for the general options (requires setting a private @@ -97,13 +97,16 @@ def create_parser(): help="Configure Strawberry Fields with your Xanadu cloud platform API token.", ) configure_parser.add_argument( - "--local", "-l", action="store_true", help="Create a local configuration file in the current directory." + "--local", + "-l", + action="store_true", + help="Create a local configuration file in the current directory.", ) # Adding the input subparser run_parser = subparsers.add_parser("run", help="Run a blackbird script.") run_parser.add_argument( - "input", type=str, help="The filename or path to the blackbird script to run.", + "input", type=str, help="The filename or path to the blackbird script to run." ) run_parser.set_defaults(func=run_blackbird_script) run_parser.add_argument( @@ -140,6 +143,7 @@ def configure(args): else: store_account(**kwargs) + def ping(): """Tests the connection to the remote backend.""" if Connection().ping(): @@ -147,6 +151,7 @@ def ping(): else: sys.stdout.write("There was a problem when authenticating to the platform!\n") + def configuration_wizard(): r"""Provides an interactive selection wizard on the command line to configure every option for the API connection. @@ -164,8 +169,8 @@ def configuration_wizard(): ssl_default = "y" if default_config["use_ssl"] else "n" port_default = default_config["port"] - authentication_token = ( - input("Please enter the authentication token to use when connecting: [] ") + authentication_token = input( + "Please enter the authentication token to use when connecting: [] " ) if not authentication_token: @@ -173,13 +178,22 @@ def configuration_wizard(): sys.exit() hostname = ( - input("Please enter the hostname of the server to connect to: [{}] ".format(hostname_default)) or hostname_default + input( + "Please enter the hostname of the server to connect to: [{}] ".format(hostname_default) + ) + or hostname_default ) - ssl_input = input("Should the client attempt to connect over SSL? [{}] ".format(ssl_default)) or ssl_default + ssl_input = ( + input("Should the client attempt to connect over SSL? [{}] ".format(ssl_default)) + or ssl_default + ) use_ssl = ssl_input.upper() == "Y" - port = input("Please enter the port number to connect with: [{}] ".format(port_default)) or port_default + port = ( + input("Please enter the port number to connect with: [{}] ".format(port_default)) + or port_default + ) kwargs = { "authentication_token": authentication_token, @@ -189,6 +203,7 @@ def configuration_wizard(): } return kwargs + def run_blackbird_script(args): """Run a blackbird script. @@ -214,9 +229,12 @@ def run_blackbird_script(args): if result and result.samples is not None: write_script_results(result.samples, output_file=args.output) else: - sys.stdout.write("Ooops! Something went wrong with obtaining the results. Please check the Blackbird script specified and the connection to the remote engine.") + sys.stdout.write( + "Ooops! Something went wrong with obtaining the results. Please check the Blackbird script specified and the connection to the remote engine." + ) sys.exit() + def write_script_results(samples, output_file=None): """Write the results of the script either to a file or to the standard output. diff --git a/strawberryfields/decompositions.py b/strawberryfields/decompositions.py index 12a6ede0e..2c44f3112 100644 --- a/strawberryfields/decompositions.py +++ b/strawberryfields/decompositions.py @@ -44,7 +44,7 @@ def takagi(N, tol=1e-13, rounding=13): (n, m) = N.shape if n != m: raise ValueError("The input matrix must be square") - if np.linalg.norm(N-np.transpose(N)) >= tol: + if np.linalg.norm(N - np.transpose(N)) >= tol: raise ValueError("The input matrix is not symmetric") v, l, ws = np.linalg.svd(N) @@ -61,7 +61,7 @@ def takagi(N, tol=1e-13, rounding=13): for k in result: for ind, j in enumerate(k): # pylint: disable=unused-variable k[ind] = kk - kk = kk+1 + kk = kk + 1 # Generate the lists with the degenerate column subspaces vas = [] @@ -214,9 +214,9 @@ def bipartite_graph_embed(A, mean_photon_per_mode=1.0, rtol=1e-05, atol=1e-08): def T(m, n, theta, phi, nmax): r"""The Clements T matrix from Eq. 1 of the paper""" mat = np.identity(nmax, dtype=np.complex128) - mat[m, m] = np.exp(1j*phi)*np.cos(theta) + mat[m, m] = np.exp(1j * phi) * np.cos(theta) mat[m, n] = -np.sin(theta) - mat[n, m] = np.exp(1j*phi)*np.sin(theta) + mat[n, m] = np.exp(1j * phi) * np.sin(theta) mat[n, n] = np.cos(theta) return mat @@ -236,15 +236,15 @@ def nullTi(m, n, U): if U[m, n] == 0: thetar = 0 phir = 0 - elif U[m, n+1] == 0: - thetar = np.pi/2 + elif U[m, n + 1] == 0: + thetar = np.pi / 2 phir = 0 else: - r = U[m, n] / U[m, n+1] + r = U[m, n] / U[m, n + 1] thetar = np.arctan(np.abs(r)) phir = np.angle(r) - return [n, n+1, thetar, phir, nmax] + return [n, n + 1, thetar, phir, nmax] def nullT(n, m, U): @@ -257,15 +257,15 @@ def nullT(n, m, U): if U[n, m] == 0: thetar = 0 phir = 0 - elif U[n-1, m] == 0: - thetar = np.pi/2 + elif U[n - 1, m] == 0: + thetar = np.pi / 2 phir = 0 else: - r = -U[n, m] / U[n-1, m] + r = -U[n, m] / U[n - 1, m] thetar = np.arctan(np.abs(r)) phir = np.angle(r) - return [n-1, n, thetar, phir, nmax] + return [n - 1, n, thetar, phir, nmax] def rectangular(V, tol=1e-11): @@ -304,14 +304,14 @@ def rectangular(V, tol=1e-11): tilist = [] tlist = [] - for k, i in enumerate(range(nsize-2, -1, -1)): + for k, i in enumerate(range(nsize - 2, -1, -1)): if k % 2 == 0: - for j in reversed(range(nsize-1-i)): - tilist.append(nullTi(i+j+1, j, localV)) + for j in reversed(range(nsize - 1 - i)): + tilist.append(nullTi(i + j + 1, j, localV)) localV = localV @ Ti(*tilist[-1]) else: - for j in range(nsize-1-i): - tlist.append(nullT(i+j+1, j, localV)) + for j in range(nsize - 1 - i): + tlist.append(nullT(i + j + 1, j, localV)) localV = T(*tlist[-1]) @ localV return tilist, np.diag(localV), tlist @@ -354,7 +354,7 @@ def rectangular_phase_end(V, tol=1e-11): new_beta = beta new_i = [i[0], i[1], new_theta, new_phi, i[4]] - new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta) + new_diags[em], new_diags[en] = np.exp(1j * new_alpha), np.exp(1j * new_beta) new_tlist = new_tlist + [new_i] @@ -444,12 +444,12 @@ def rectangular_symmetric(V, tol=1e-11): internal_phase = (np.pi + 2.0 * theta) % (2 * np.pi) # repeat modulo operations , otherwise the input unitary # numpy.identity(20) yields an external_phase of exactly 2 * pi - external_phase %= (2 * np.pi) - internal_phase %= (2 * np.pi) + external_phase %= 2 * np.pi + internal_phase %= 2 * np.pi new_alpha = beta - theta + np.pi - new_beta = 0*np.pi - theta + beta + new_beta = 0 * np.pi - theta + beta new_i = [i[0], i[1], internal_phase, external_phase, i[4]] - new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta) + new_diags[em], new_diags[en] = np.exp(1j * new_alpha), np.exp(1j * new_beta) new_tlist = new_tlist + [new_i] new_diags = diags * new_diags @@ -482,9 +482,9 @@ def triangular(V, tol=1e-11): raise ValueError("The input matrix is not unitary") tlist = [] - for i in range(nsize-2, -1, -1): - for j in range(i+1): - tlist.append(nullT(nsize-j-1, nsize-i-2, localV)) + for i in range(nsize - 2, -1, -1): + for j in range(i + 1): + tlist.append(nullT(nsize - j - 1, nsize - i - 2, localV)) localV = T(*tlist[-1]) @ localV return list(reversed(tlist)), np.diag(localV), None @@ -516,16 +516,15 @@ def williamson(V, tol=1e-11): if n != m: raise ValueError("The input matrix is not square") - diffn = np.linalg.norm(V-np.transpose(V)) + diffn = np.linalg.norm(V - np.transpose(V)) if diffn >= tol: raise ValueError("The input matrix is not symmetric") if n % 2 != 0: - raise ValueError( - "The input matrix must have an even number of rows/columns") + raise ValueError("The input matrix must have an even number of rows/columns") - n = n//2 + n = n // 2 omega = sympmat(n) rotmat = changebasis(n) vals = np.linalg.eigvalsh(V) @@ -547,7 +546,7 @@ def williamson(V, tol=1e-11): # go to the ordering x_1, ..., x_n, p_1, ... , p_n for i in range(n): - if s1[2*i, 2*i+1] > 0: + if s1[2 * i, 2 * i + 1] > 0: seq.append(I) else: seq.append(X) @@ -555,10 +554,9 @@ def williamson(V, tol=1e-11): p = block_diag(*seq) Kt = K @ p s1t = p @ s1 @ p - dd = np.transpose(rotmat) @ s1t @rotmat + dd = np.transpose(rotmat) @ s1t @ rotmat Ktt = Kt @ rotmat - Db = np.diag([1/dd[i, i+n] for i in range(n)] + [1/dd[i, i+n] - for i in range(n)]) + Db = np.diag([1 / dd[i, i + n] for i in range(n)] + [1 / dd[i, i + n] for i in range(n)]) S = Mm12 @ Ktt @ sqrtm(Db) return Db, np.linalg.inv(S).T @@ -603,24 +601,23 @@ def bloch_messiah(S, tol=1e-10, rounding=9): if n != m: raise ValueError("The input matrix is not square") if n % 2 != 0: - raise ValueError( - "The input matrix must have an even number of rows/columns") + raise ValueError("The input matrix must have an even number of rows/columns") - n = n//2 + n = n // 2 omega = sympmat(n) if np.linalg.norm(np.transpose(S) @ omega @ S - omega) >= tol: raise ValueError("The input matrix is not symplectic") - if np.linalg.norm(np.transpose(S) @ S - np.eye(2*n)) >= tol: + if np.linalg.norm(np.transpose(S) @ S - np.eye(2 * n)) >= tol: - u, sigma = polar(S, side='left') + u, sigma = polar(S, side="left") ss, uss = takagi(sigma, tol=tol, rounding=rounding) # Apply a permutation matrix so that the squeezers appear in the order # s_1,...,s_n, 1/s_1,...1/s_n - perm = np.array(list(range(0, n)) + list(reversed(range(n, 2*n)))) + perm = np.array(list(range(0, n)) + list(reversed(range(n, 2 * n)))) - pmat = np.identity(2*n)[perm, :] + pmat = np.identity(2 * n)[perm, :] ut = uss @ pmat # Apply a second permutation matrix to permute s @@ -641,7 +638,7 @@ def bloch_messiah(S, tol=1e-10, rounding=9): u_list, v_list = [], [] for start_i, stop_i in zip(start_is, stop_is): - x = qomega[start_i: stop_i, n + start_i: n + stop_i].real + x = qomega[start_i:stop_i, n + start_i : n + stop_i].real u_svd, _s_svd, v_svd = np.linalg.svd(x) u_list = u_list + [u_svd] v_list = v_list + [v_svd.T] @@ -654,8 +651,8 @@ def bloch_messiah(S, tol=1e-10, rounding=9): else: ut1 = S - st1 = np.eye(2*n) - v1 = np.eye(2*n) + st1 = np.eye(2 * n) + v1 = np.eye(2 * n) return ut1.real, st1.real, v1.real @@ -683,10 +680,10 @@ def covmat_to_hamil(V, tol=1e-10): # pragma: no cover (n, m) = V.shape if n != m: raise ValueError("Input matrix must be square") - if np.linalg.norm(V-np.transpose(V)) >= tol: + if np.linalg.norm(V - np.transpose(V)) >= tol: raise ValueError("The input matrix is not symmetric") - n = n//2 + n = n // 2 omega = sympmat(n) vals = np.linalg.eigvalsh(V) @@ -694,9 +691,9 @@ def covmat_to_hamil(V, tol=1e-10): # pragma: no cover if val <= 0: raise ValueError("Input matrix is not positive definite") - W = 1j*V @ omega + W = 1j * V @ omega l, v = np.linalg.eig(W) - H = (1j * omega @ (v @ np.diag(np.arctanh(1.0/l.real)) @ np.linalg.inv(v))).real + H = (1j * omega @ (v @ np.diag(np.arctanh(1.0 / l.real)) @ np.linalg.inv(v))).real return H @@ -719,7 +716,7 @@ def hamil_to_covmat(H, tol=1e-10): # pragma: no cover (n, m) = H.shape if n != m: raise ValueError("Input matrix must be square") - if np.linalg.norm(H-np.transpose(H)) >= tol: + if np.linalg.norm(H - np.transpose(H)) >= tol: raise ValueError("The input matrix is not symmetric") vals = np.linalg.eigvalsh(H) @@ -727,10 +724,10 @@ def hamil_to_covmat(H, tol=1e-10): # pragma: no cover if val <= 0: raise ValueError("Input matrix is not positive definite") - n = n//2 + n = n // 2 omega = sympmat(n) - Wi = 1j*omega @ H + Wi = 1j * omega @ H l, v = np.linalg.eig(Wi) - V = (1j * (v @ np.diag(1.0/np.tanh(l.real)) @ np.linalg.inv(v)) @ omega).real + V = (1j * (v @ np.diag(1.0 / np.tanh(l.real)) @ np.linalg.inv(v)) @ omega).real return V diff --git a/strawberryfields/engine.py b/strawberryfields/engine.py index 7f38daa97..f46f17867 100644 --- a/strawberryfields/engine.py +++ b/strawberryfields/engine.py @@ -280,9 +280,7 @@ def _broadcast_nones(val, dim): self._run_program(p, **kwargs) shots = kwargs.get("shots", 1) - self.samples = [ - _broadcast_nones(p.reg_refs[k].val, shots) for k in sorted(p.reg_refs) - ] + self.samples = [_broadcast_nones(p.reg_refs[k].val, shots) for k in sorted(p.reg_refs)] self.run_progs.append(p) prev = p @@ -488,7 +486,9 @@ class RemoteEngine: VALID_TARGETS = ("X8_01", "X12_01", "X12_02") DEFAULT_TARGETS = {"X8": "X8_01", "X12": "X12_01"} - def __init__(self, target: str, connection: Connection = Connection(), backend_options: dict = None): + def __init__( + self, target: str, connection: Connection = Connection(), backend_options: dict = None + ): self._target = self.DEFAULT_TARGETS.get(target, target) if self._target not in self.VALID_TARGETS: diff --git a/strawberryfields/io.py b/strawberryfields/io.py index d018728a2..fdb0b1689 100644 --- a/strawberryfields/io.py +++ b/strawberryfields/io.py @@ -26,8 +26,7 @@ # for automodapi, do not include the classes that should appear under the top-level strawberryfields namespace -__all__ = ['to_blackbird', 'to_program', 'loads'] - +__all__ = ["to_blackbird", "to_program", "loads"] def to_blackbird(prog, version="1.0"): @@ -97,7 +96,7 @@ def to_program(bb): # to initialize the Program object with. raise ValueError("Blackbird program contains no quantum operations!") - prog = sfp.Program(max(bb.modes)+1, name=bb.name) + prog = sfp.Program(max(bb.modes) + 1, name=bb.name) # append the quantum operations with prog.context as q: @@ -116,20 +115,20 @@ def to_program(bb): # create the list of regrefs regrefs = [q[i] for i in op["modes"]] - if 'args' in op: + if "args" in op: # the gate has arguments - args = op['args'] - kwargs = op['kwargs'] + args = op["args"] + kwargs = op["kwargs"] # Convert symbolic expressions in args/kwargs containing measured and free parameters to # symbolic expressions containing the corresponding MeasuredParameter and FreeParameter instances. args = sfpar.par_convert(args, prog) vals = sfpar.par_convert(kwargs.values(), prog) kwargs = dict(zip(kwargs.keys(), vals)) - gate(*args, **kwargs) | regrefs #pylint:disable=expression-not-assigned + gate(*args, **kwargs) | regrefs # pylint:disable=expression-not-assigned else: # the gate has no arguments - gate | regrefs #pylint:disable=expression-not-assigned,pointless-statement + gate | regrefs # pylint:disable=expression-not-assigned,pointless-statement # compile the program if a compile target is given targ = bb.target @@ -214,6 +213,7 @@ def loads(s): bb = blackbird.loads(s) return to_program(bb) + def load(f): """Load a quantum program from a Blackbird .xbb file. diff --git a/strawberryfields/ops.py b/strawberryfields/ops.py index 3e21e4e27..1fb90326c 100644 --- a/strawberryfields/ops.py +++ b/strawberryfields/ops.py @@ -30,8 +30,8 @@ import strawberryfields.decompositions as dec from .backends.states import BaseFockState, BaseGaussianState from .backends.shared_ops import changebasis -from .program_utils import (Command, RegRef, MergeFailure) -from .parameters import (par_regref_deps, par_str, par_evaluate, par_is_symbolic, par_funcs as pf) +from .program_utils import Command, RegRef, MergeFailure +from .parameters import par_regref_deps, par_str, par_evaluate, par_is_symbolic, par_funcs as pf # pylint: disable=abstract-method # pylint: disable=protected-access @@ -39,13 +39,15 @@ # numerical tolerances _decomposition_merge_tol = 1e-13 -_decomposition_tol = 1e-13 # TODO this tolerance is used for various purposes and is not well-defined +_decomposition_tol = ( + 1e-13 # TODO this tolerance is used for various purposes and is not well-defined +) def warning_on_one_line(message, category, filename, lineno, file=None, line=None): """User warning formatter""" # pylint: disable=unused-argument - return '{}:{}: {}: {}\n'.format(filename, lineno, category.__name__, message) + return "{}:{}: {}: {}\n".format(filename, lineno, category.__name__, message) warnings.formatwarning = warning_on_one_line @@ -73,6 +75,7 @@ class Operation: par (Sequence[Any]): Operation parameters. An empty sequence if no parameters are required. """ + # default: one-subsystem operation #: int: number of subsystems the operation acts on, or None if any number > 0 is ok ns = 1 @@ -86,7 +89,7 @@ def __init__(self, par): # convert each parameter into a Parameter instance, keep track of dependenciens for q in par: if isinstance(q, RegRef): - raise TypeError('Use RegRef.par for measured parameters.') + raise TypeError("Use RegRef.par for measured parameters.") self.p.append(q) self._measurement_deps |= par_regref_deps(q) @@ -102,7 +105,7 @@ def __str__(self): # class name and parameter values temp = [par_str(i) for i in self.p] - return self.__class__.__name__+'('+', '.join(temp)+')' + return self.__class__.__name__ + "(" + ", ".join(temp) + ")" @property def measurement_deps(self): @@ -179,7 +182,7 @@ def _decompose(self, reg, **kwargs): Returns: list[Command]: decomposition as a list of operations acting on specific subsystems """ - raise NotImplementedError('No decomposition available: {}'.format(self)) + raise NotImplementedError("No decomposition available: {}".format(self)) def _apply(self, reg, backend, **kwargs): """Internal apply method. Uses numeric subsystem referencing. @@ -192,7 +195,7 @@ def _apply(self, reg, backend, **kwargs): Returns: array[Number] or None: Measurement results, if any; shape == (len(reg), shots). """ - raise NotImplementedError('Missing direct implementation: {}'.format(self)) + raise NotImplementedError("Missing direct implementation: {}".format(self)) def apply(self, reg, backend, **kwargs): """Ask a local backend to execute the operation on the current register state right away. @@ -232,10 +235,10 @@ def merge(self, other): # sequential preparation, only the last one matters if isinstance(other, Preparation): # give a warning, since this is pointless and probably a user error - warnings.warn('Two subsequent state preparations, first one has no effect.') + warnings.warn("Two subsequent state preparations, first one has no effect.") return other - raise MergeFailure('For now, Preparations cannot be merged with anything else.') + raise MergeFailure("For now, Preparations cannot be merged with anything else.") class Measurement(Operation): @@ -256,6 +259,7 @@ class Measurement(Operation): Allows the post-selection of specific measurement results instead of randomly sampling. None means no postselection. """ + # todo: self.select could support :class:`~strawberryfields.parameters.Parameter` instances. ns = None @@ -268,11 +272,11 @@ def __str__(self): # class name, parameter values, and possibly the select parameter temp = super().__str__() if self.select is not None: - temp = temp[:-1] + ', select={})'.format(self.select) + temp = temp[:-1] + ", select={})".format(self.select) return temp def merge(self, other): - raise MergeFailure('For now, measurements cannot be merged with anything else.') + raise MergeFailure("For now, measurements cannot be merged with anything else.") def apply(self, reg, backend, **kwargs): """Ask a backend to execute the operation on the current register state right away. @@ -307,13 +311,16 @@ class Decomposition(Operation): .. note:: The first parameter ``p[0]`` of a Decomposition is always a square matrix, and it cannot be symbolic. """ + ns = None # overridden by child classes in __init__ @staticmethod def _check_p0(p0): """Checks that p0 is not symbolic.""" if par_is_symbolic(p0): - raise TypeError("The first parameter of a Decomposition is a square matrix, and cannot be symbolic.") + raise TypeError( + "The first parameter of a Decomposition is a square matrix, and cannot be symbolic." + ) def __init__(self, par, decomp=True): self._check_p0(par[0]) @@ -340,7 +347,7 @@ def merge(self, other): return self.__class__(U) - raise MergeFailure('Not the same decomposition type.') + raise MergeFailure("Not the same decomposition type.") class Transformation(Operation): @@ -349,6 +356,7 @@ class Transformation(Operation): This class provides the base behaviour for operations which act on existing states. """ + # NOTE: At the moment this is an empty class, and only # exists for a nicer inheritence diagram. One option is # to remove, and make Channel and Gate top-level derived classes. @@ -367,12 +375,13 @@ class Channel(Transformation): This class provides the base behaviour for non-unitary maps and transformations. """ + # TODO decide how all Channels should treat the first parameter p[0] # (see e.g. https://en.wikipedia.org/wiki/C0-semigroup), cf. p[0] in ops.Gate def merge(self, other): if not self.__class__ == other.__class__: - raise MergeFailure('Not the same channel family.') + raise MergeFailure("Not the same channel family.") # channels can be merged if they are the same class and share all the other parameters if self.p[1:] == other.p[1:]: @@ -471,7 +480,7 @@ def apply(self, reg, backend, **kwargs): def merge(self, other): if not self.__class__ == other.__class__: - raise MergeFailure('Not the same gate family.') + raise MergeFailure("Not the same gate family.") # gates can be merged if they are the same class and share all the other parameters if self.p[1:] == other.p[1:]: @@ -494,7 +503,6 @@ def merge(self, other): raise MergeFailure("Don't know how to merge these gates.") - # ==================================================================== # State preparation operations # ==================================================================== @@ -515,7 +523,7 @@ def _apply(self, reg, backend, **kwargs): def __str__(self): # return the shorthand object when the # command is printed by the user - return 'Vac' + return "Vac" class Coherent(Preparation): @@ -529,7 +537,7 @@ class Coherent(Preparation): p (float): phase angle :math:`\phi` """ - def __init__(self, a=0., p=0.): + def __init__(self, a=0.0, p=0.0): super().__init__([a, p]) def _apply(self, reg, backend, **kwargs): @@ -546,7 +554,7 @@ class Squeezed(Preparation): p (float): squeezing angle :math:`\phi` """ - def __init__(self, r=0., p=0.): + def __init__(self, r=0.0, p=0.0): super().__init__([r, p]) def _apply(self, reg, backend, **kwargs): @@ -571,7 +579,7 @@ class DisplacedSqueezed(Preparation): p (float): squeezing angle :math:`\phi` """ - def __init__(self, alpha=0., r=0., p=0.): + def __init__(self, alpha=0.0, r=0.0, p=0.0): super().__init__([alpha, r, p]) def _apply(self, reg, backend, **kwargs): @@ -581,10 +589,7 @@ def _apply(self, reg, backend, **kwargs): def _decompose(self, reg, **kwargs): # squeezed state preparation followed by a displacement gate - return [ - Command(Squeezed(self.p[1], self.p[2]), reg), - Command(Dgate(self.p[0]), reg) - ] + return [Command(Squeezed(self.p[1], self.p[2]), reg), Command(Dgate(self.p[0]), reg)] class Fock(Preparation): @@ -631,14 +636,14 @@ def _apply(self, reg, backend, **kwargs): l = np.arange(D)[:, np.newaxis] # normalization constant - temp = pf.exp(-0.5 * pf.Abs(alpha)**2) - N = temp / pf.sqrt(2*(1 + pf.cos(phi) * temp**4)) + temp = pf.exp(-0.5 * pf.Abs(alpha) ** 2) + N = temp / pf.sqrt(2 * (1 + pf.cos(phi) * temp ** 4)) # coherent states c1 = (alpha ** l) / np.sqrt(ssp.factorial(l)) c2 = ((-alpha) ** l) / np.sqrt(ssp.factorial(l)) # add them up with a relative phase - ket = (c1 + pf.exp(1j*phi) * c2) * N + ket = (c1 + pf.exp(1j * phi) * c2) * N # in order to support broadcasting, the batch axis has been located at last axis, but backend expects it up as first axis ket = np.transpose(ket) @@ -749,6 +754,7 @@ class MeasureFock(Measurement): After measurement, the modes are reset to the vacuum state. """ + ns = None def __init__(self, select=None): @@ -766,6 +772,7 @@ class MeasureThreshold(Measurement): After measurement, the modes are reset to the vacuum state. """ + ns = None def __init__(self, select=None): @@ -810,9 +817,9 @@ def _apply(self, reg, backend, shots=1, **kwargs): def __str__(self): if self.select is None: if self.p[0] == 0: - return 'MeasureX' + return "MeasureX" if self.p[0] == np.pi / 2: - return 'MeasureP' + return "MeasureP" return super().__str__() @@ -838,8 +845,8 @@ def _apply(self, reg, backend, shots=1, **kwargs): def __str__(self): if self.select is None: - return 'MeasureHD' - return 'MeasureHeterodyne(select={})'.format(self.select) + return "MeasureHD" + return "MeasureHeterodyne(select={})".format(self.select) # ==================================================================== @@ -912,7 +919,7 @@ class Dgate(Gate): phi (float): extra (optional) phase angle :math:`\phi` """ - def __init__(self, a, phi=0.): + def __init__(self, a, phi=0.0): super().__init__([a, phi]) def _apply(self, reg, backend, **kwargs): @@ -934,14 +941,10 @@ class Xgate(Gate): def __init__(self, x): super().__init__([x]) - def _decompose(self, reg, **kwargs): # into a displacement z = self.p[0] / np.sqrt(2 * sf.hbar) - return [ - Command(Dgate(z, 0), reg) - ] - + return [Command(Dgate(z, 0), reg)] class Zgate(Gate): @@ -959,10 +962,8 @@ def __init__(self, p): def _decompose(self, reg, **kwargs): # into a displacement - z = self.p[0] * 1j/np.sqrt(2 * sf.hbar) - return [ - Command(Dgate(z, 0), reg) - ] + z = self.p[0] * 1j / np.sqrt(2 * sf.hbar) + return [Command(Dgate(z, 0), reg)] class Sgate(Gate): @@ -978,7 +979,7 @@ class Sgate(Gate): phi (float): squeezing phase angle :math:`\phi` """ - def __init__(self, r, phi=0.): + def __init__(self, r, phi=0.0): super().__init__([r, phi]) def _apply(self, reg, backend, **kwargs): @@ -1003,13 +1004,10 @@ def __init__(self, s): def _decompose(self, reg, **kwargs): # into a squeeze and a rotation temp = self.p[0] / 2 - r = pf.acosh(pf.sqrt(1+temp**2)) + r = pf.acosh(pf.sqrt(1 + temp ** 2)) theta = pf.atan(temp) phi = -np.pi / 2 * pf.sign(temp) - theta - return [ - Command(Sgate(r, phi), reg), - Command(Rgate(theta), reg) - ] + return [Command(Sgate(r, phi), reg), Command(Rgate(theta), reg)] class Vgate(Gate): @@ -1084,7 +1082,7 @@ class BSgate(Gate): """ ns = 2 - def __init__(self, theta=np.pi/4, phi=0.): + def __init__(self, theta=np.pi / 4, phi=0.0): # default: 50% beamsplitter super().__init__([theta, phi]) @@ -1117,9 +1115,9 @@ def _decompose(self, reg, **kwargs): # into local phase shifts and two 50-50 beamsplitters return [ Command(Rgate(self.p[1]), reg[0]), - Command(BSgate(np.pi/4, np.pi/2), reg), + Command(BSgate(np.pi / 4, np.pi / 2), reg), Command(Rgate(self.p[0]), reg[0]), - Command(BSgate(np.pi/4, np.pi/2), reg) + Command(BSgate(np.pi / 4, np.pi / 2), reg), ] @@ -1137,7 +1135,7 @@ class S2gate(Gate): """ ns = 2 - def __init__(self, r, phi=0.): + def __init__(self, r, phi=0.0): super().__init__([r, phi]) def _apply(self, reg, backend, **kwargs): @@ -1149,12 +1147,7 @@ def _decompose(self, reg, **kwargs): # two opposite squeezers sandwiched between 50% beamsplitters S = Sgate(self.p[0], self.p[1]) BS = BSgate(np.pi / 4, 0) - return [ - Command(BS, reg), - Command(S, reg[0]), - Command(S.H, reg[1]), - Command(BS.H, reg) - ] + return [Command(BS, reg), Command(S, reg[0]), Command(S.H, reg[1]), Command(BS.H, reg)] class CXgate(Gate): @@ -1176,7 +1169,7 @@ def __init__(self, s=1): def _decompose(self, reg, **kwargs): s = self.p[0] - r = pf.asinh(-s/2) + r = pf.asinh(-s / 2) theta = 0.5 * pf.atan2(-1.0 / pf.cosh(r), -pf.tanh(r)) return [ Command(BSgate(theta, 0), reg), @@ -1209,7 +1202,7 @@ def _decompose(self, reg, **kwargs): return [ Command(Rgate(-np.pi / 2), reg[1]), Command(CX, reg), - Command(Rgate(np.pi / 2), reg[1]) + Command(Rgate(np.pi / 2), reg[1]), ] @@ -1248,16 +1241,14 @@ def __init__(self): def _decompose(self, reg, **kwargs): # into a rotation - theta = np.pi/2 - return [ - Command(Rgate(theta), reg) - ] + theta = np.pi / 2 + return [Command(Rgate(theta), reg)] def __str__(self): """String representation for the gate.""" - temp = 'Fourier' + temp = "Fourier" if self.dagger: - temp += '.H' + temp += ".H" return temp @@ -1288,6 +1279,7 @@ class _Delete(MetaOperation): The deleted modes are traced out. After the deletion the state of the remaining subsystems may have to be described using a density operator. """ + ns = None def __or__(self, reg): @@ -1299,7 +1291,7 @@ def _apply(self, reg, backend, **kwargs): def __str__(self): # use the shorthand object - return 'Del' + return "Del" def New(n=1): @@ -1315,7 +1307,7 @@ def New(n=1): tuple[RegRef]: tuple of the newly added subsystem references """ if pu.Program_current_context is None: - raise RuntimeError('New() can only be called inside a Program context.') + raise RuntimeError("New() can only be called inside a Program context.") # create RegRefs for the new modes refs = pu.Program_current_context._add_subsystems(n) # append the actual Operation to the Program @@ -1329,6 +1321,7 @@ class _New_modes(MetaOperation): This class cannot be used with the :meth:`__or__` syntax since it would be misleading. Indeed, users should *not* use this class directly, but rather the function :func:`New`. """ + ns = 0 def __init__(self, n=1): @@ -1345,7 +1338,7 @@ def _apply(self, reg, backend, **kwargs): def __str__(self): # use the shorthand object - return 'New({})'.format(self.n) + return "New({})".format(self.n) class All(MetaOperation): @@ -1362,7 +1355,7 @@ def __init__(self, op): self.op = op #: Operation: one-subsystem operation to apply def __str__(self): - return super().__str__() + '({})'.format(str(self.op)) + return super().__str__() + "({})".format(str(self.op)) def __or__(self, reg): # into a list of subsystems @@ -1448,10 +1441,17 @@ def __init__(self, U, mesh="rectangular", drop_identity=True, tol=1e-6): self.tol = tol self.drop_identity = drop_identity - allowed_meshes = {"rectangular", "rectangular_phase_end", "rectangular_symmetric", "triangular"} + allowed_meshes = { + "rectangular", + "rectangular_phase_end", + "rectangular_symmetric", + "triangular", + } if mesh not in allowed_meshes: - raise ValueError("Unknown mesh '{}'. Mesh must be one of {}".format(mesh, allowed_meshes)) + raise ValueError( + "Unknown mesh '{}'. Mesh must be one of {}".format(mesh, allowed_meshes) + ) self.identity = np.allclose(U, np.identity(len(U)), atol=_decomposition_merge_tol, rtol=0) @@ -1472,7 +1472,12 @@ def _decompose(self, reg, **kwargs): if "symmetric" in mesh: # Mach-Zehnder interferometers - cmds.append(Command(MZgate(np.mod(theta, 2*np.pi), np.mod(phi, 2*np.pi)), (reg[n], reg[m]))) + cmds.append( + Command( + MZgate(np.mod(theta, 2 * np.pi), np.mod(phi, 2 * np.pi)), + (reg[n], reg[m]), + ) + ) else: # Clements style beamsplitters @@ -1531,7 +1536,11 @@ def __init__(self, A, mean_photon_per_mode=1.0, make_traceless=False, tol=1e-6): else: self.identity = False self.sq, self.U = dec.graph_embed( - A, mean_photon_per_mode=mean_photon_per_mode, make_traceless=make_traceless, atol=tol, rtol=0 + A, + mean_photon_per_mode=mean_photon_per_mode, + make_traceless=make_traceless, + atol=tol, + rtol=0, ) def _decompose(self, reg, **kwargs): @@ -1589,21 +1598,24 @@ def __init__(self, A, mean_photon_per_mode=1.0, edges=False, drop_identity=True, self.drop_identity = drop_identity if edges: - self.ns = 2*A.shape[0] + self.ns = 2 * A.shape[0] B = A else: self.ns = A.shape[0] # check if A is a bipartite graph - N = A.shape[0]//2 + N = A.shape[0] // 2 A00 = A[:N, :N] A11 = A[N:, N:] - diag_zeros = np.allclose(A00, np.zeros_like(A00), atol=tol, rtol=0) \ - and np.allclose(A11, np.zeros_like(A11), atol=tol, rtol=0) + diag_zeros = np.allclose(A00, np.zeros_like(A00), atol=tol, rtol=0) and np.allclose( + A11, np.zeros_like(A11), atol=tol, rtol=0 + ) if (not diag_zeros) or (not np.allclose(A, A.T, atol=tol, rtol=0)): - raise ValueError("Adjacency matrix {} does not represent a bipartite graph".format(A)) + raise ValueError( + "Adjacency matrix {} does not represent a bipartite graph".format(A) + ) B = A[:N, N:] @@ -1620,14 +1632,16 @@ def _decompose(self, reg, **kwargs): B = self.p[0] N = len(B) - sq, U, V = dec.bipartite_graph_embed(B, mean_photon_per_mode=mean_photon_per_mode, atol=tol, rtol=0) + sq, U, V = dec.bipartite_graph_embed( + B, mean_photon_per_mode=mean_photon_per_mode, atol=tol, rtol=0 + ) if not self.identity or not drop_identity: for m, s in enumerate(sq): s = s if np.abs(s) >= _decomposition_tol else 0 if not (drop_identity and s == 0): - cmds.append(Command(S2gate(-s), (reg[m], reg[m+N]))) + cmds.append(Command(S2gate(-s), (reg[m], reg[m + N]))) for X, _reg in ((U, reg[:N]), (V, reg[N:])): @@ -1635,7 +1649,11 @@ def _decompose(self, reg, **kwargs): X = np.identity(len(X)) if not (drop_identity and np.all(X == np.identity(len(X)))): - cmds.append(Command(Interferometer(X, mesh=mesh, drop_identity=drop_identity, tol=tol), _reg)) + cmds.append( + Command( + Interferometer(X, mesh=mesh, drop_identity=drop_identity, tol=tol), _reg + ) + ) return cmds @@ -1675,21 +1693,26 @@ class GaussianTransform(Decomposition): tol (float): the tolerance used when checking if the matrix is symplectic: :math:`|S^T\Omega S-\Omega| \leq` tol """ + def __init__(self, S, vacuum=False, tol=1e-10): super().__init__([S]) self.ns = S.shape[0] // 2 - self.vacuum = vacuum #: bool: if True, ignore the first unitary matrix when applying the gate + self.vacuum = ( + vacuum #: bool: if True, ignore the first unitary matrix when applying the gate + ) N = self.ns # shorthand # check if input symplectic is passive (orthogonal) - diffn = np.linalg.norm(S @ S.T - np.identity(2*N)) - self.active = (np.abs(diffn) > _decomposition_tol) #: bool: S is an active symplectic transformation + diffn = np.linalg.norm(S @ S.T - np.identity(2 * N)) + self.active = ( + np.abs(diffn) > _decomposition_tol + ) #: bool: S is an active symplectic transformation if not self.active: # The transformation is passive, do Clements X1 = S[:N, :N] P1 = S[N:, :N] - self.U1 = X1+1j*P1 + self.U1 = X1 + 1j * P1 else: # transformation is active, do Bloch-Messiah O1, smat, O2 = dec.bloch_messiah(S, tol=tol) @@ -1698,9 +1721,11 @@ def __init__(self, S, vacuum=False, tol=1e-10): X2 = O2[:N, :N] P2 = O2[N:, :N] - self.U1 = X1+1j*P1 #: array[complex]: unitary matrix corresponding to O_1 - self.U2 = X2+1j*P2 #: array[complex]: unitary matrix corresponding to O_2 - self.Sq = np.diagonal(smat)[:N] #: array[complex]: diagonal vector of the squeezing matrix R + self.U1 = X1 + 1j * P1 #: array[complex]: unitary matrix corresponding to O_1 + self.U2 = X2 + 1j * P2 #: array[complex]: unitary matrix corresponding to O_2 + self.Sq = np.diagonal(smat)[ + :N + ] #: array[complex]: diagonal vector of the squeezing matrix R def _decompose(self, reg, **kwargs): cmds = [] @@ -1760,26 +1785,26 @@ def __init__(self, V, r=None, decomp=True, tol=1e-6): self.ns = V.shape[0] // 2 if r is None: - r = np.zeros(2*self.ns) + r = np.zeros(2 * self.ns) r = np.asarray(r) if len(r) != V.shape[0]: - raise ValueError('Vector of means must have the same length as the covariance matrix.') + raise ValueError("Vector of means must have the same length as the covariance matrix.") super().__init__([V, r], decomp=decomp) # V is hbar-independent, r is not - self.x_disp = r[:self.ns] - self.p_disp = r[self.ns:] + self.x_disp = r[: self.ns] + self.p_disp = r[self.ns :] # needed only if decomposed th, self.S = dec.williamson(V, tol=tol) self.pure = np.abs(np.linalg.det(V) - 1.0) < tol - self.nbar = 0.5 * (np.diag(th)[:self.ns] - 1.0) + self.nbar = 0.5 * (np.diag(th)[: self.ns] - 1.0) def _apply(self, reg, backend, **kwargs): p = par_evaluate(self.p) s = np.sqrt(sf.hbar / 2) # scaling factor, since the backend API call is hbar-independent - backend.prepare_gaussian_state(p[1]/s, p[0], reg) + backend.prepare_gaussian_state(p[1] / s, p[0], reg) def _decompose(self, reg, **kwargs): # pylint: disable=too-many-branches @@ -1790,15 +1815,14 @@ def _decompose(self, reg, **kwargs): is_diag = np.all(V == np.diag(D)) BD = changebasis(self.ns) @ V @ changebasis(self.ns).T - BD_modes = [BD[i*2:(i+1)*2, i*2:(i+1)*2] - for i in range(BD.shape[0]//2)] + BD_modes = [BD[i * 2 : (i + 1) * 2, i * 2 : (i + 1) * 2] for i in range(BD.shape[0] // 2)] is_block_diag = (not is_diag) and np.all(BD == block_diag(*BD_modes)) if self.pure and is_diag: # covariance matrix consists of x/p quadrature squeezed state - for n, expr in enumerate(D[:self.ns]): + for n, expr in enumerate(D[: self.ns]): if np.abs(expr - 1) >= _decomposition_tol: - r = np.abs(np.log(expr)/2) + r = np.abs(np.log(expr) / 2) cmds.append(Command(Squeezed(r, 0), reg[n])) else: cmds.append(Command(Vac, reg[n])) @@ -1813,9 +1837,9 @@ def _decompose(self, reg, **kwargs): else: cmds.append(Command(Vac, reg[n])) - elif not self.pure and is_diag and np.all(D[:self.ns] == D[self.ns:]): + elif not self.pure and is_diag and np.all(D[: self.ns] == D[self.ns :]): # covariance matrix consists of thermal states - for n, nbar in enumerate(0.5 * (D[:self.ns] - 1.0)): + for n, nbar in enumerate(0.5 * (D[: self.ns] - 1.0)): if nbar >= _decomposition_tol: cmds.append(Command(Thermal(nbar), reg[n])) else: @@ -1836,15 +1860,13 @@ def _decompose(self, reg, **kwargs): cmds.append(Command(GaussianTransform(self.S, vacuum=self.pure), reg)) - cmds += [Command(Xgate(u), reg[n]) - for n, u in enumerate(self.x_disp) if u != 0] - cmds += [Command(Zgate(u), reg[n]) - for n, u in enumerate(self.p_disp) if u != 0] + cmds += [Command(Xgate(u), reg[n]) for n, u in enumerate(self.x_disp) if u != 0] + cmds += [Command(Zgate(u), reg[n]) for n, u in enumerate(self.p_disp) if u != 0] return cmds -#======================================================================= +# ======================================================================= # Shorthands, e.g. pre-constructed singleton-like objects Del = _Delete() @@ -1855,27 +1877,36 @@ def _decompose(self, reg, **kwargs): Fourier = Fouriergate() -shorthands = ['New', 'Del', 'Vac', 'MeasureX', 'MeasureP', 'MeasureHD', 'Fourier', 'All'] +shorthands = ["New", "Del", "Vac", "MeasureX", "MeasureP", "MeasureHD", "Fourier", "All"] -#======================================================================= +# ======================================================================= # here we list different classes of operations for unit testing purposes zero_args_gates = (Fouriergate,) -one_args_gates = (Xgate, Zgate, Rgate, Pgate, Vgate, - Kgate, CXgate, CZgate, CKgate) +one_args_gates = (Xgate, Zgate, Rgate, Pgate, Vgate, Kgate, CXgate, CZgate, CKgate) two_args_gates = (Dgate, Sgate, BSgate, MZgate, S2gate) gates = zero_args_gates + one_args_gates + two_args_gates channels = (LossChannel, ThermalLossChannel) -simple_state_preparations = (Vacuum, Coherent, Squeezed, DisplacedSqueezed, Fock, Catstate, Thermal) # have __init__ methods with default arguments +simple_state_preparations = ( + Vacuum, + Coherent, + Squeezed, + DisplacedSqueezed, + Fock, + Catstate, + Thermal, +) # have __init__ methods with default arguments state_preparations = simple_state_preparations + (Ket, DensityMatrix) measurements = (MeasureFock, MeasureHomodyne, MeasureHeterodyne, MeasureThreshold) decompositions = (Interferometer, BipartiteGraphEmbed, GraphEmbed, GaussianTransform, Gaussian) -#======================================================================= +# ======================================================================= # exported symbols -__all__ = [cls.__name__ for cls in gates + channels + state_preparations + measurements + decompositions] + shorthands +__all__ = [ + cls.__name__ for cls in gates + channels + state_preparations + measurements + decompositions +] + shorthands diff --git a/strawberryfields/parameters.py b/strawberryfields/parameters.py index 2d6c3bc7f..9ecb68cef 100644 --- a/strawberryfields/parameters.py +++ b/strawberryfields/parameters.py @@ -102,7 +102,6 @@ import sympy.functions as sf - def wrap_mathfunc(func): """Applies the wrapped sympy function elementwise to NumPy arrays. @@ -110,22 +109,30 @@ def wrap_mathfunc(func): We implement no broadcasting; if the first argument is a NumPy array, we assume all the arguments are arrays of the same shape. """ + @functools.wraps(func) def wrapper(*args): temp = [isinstance(k, np.ndarray) for k in args] if any(temp): if not all(temp): - raise ValueError('Parameter functions with array arguments: all the arguments must be arrays of the same shape.') + raise ValueError( + "Parameter functions with array arguments: all the arguments must be arrays of the same shape." + ) for k in args[1:]: if len(k) != len(args[0]): - raise ValueError('Parameter functions with array arguments: all the arguments must be arrays of the same shape.') + raise ValueError( + "Parameter functions with array arguments: all the arguments must be arrays of the same shape." + ) # apply func elementwise, recursively, on the args return np.array([wrapper(*k) for k in zip(*args)]) return func(*args) + return wrapper -par_funcs = types.SimpleNamespace(**{name: wrap_mathfunc(getattr(sf, name)) for name in dir(sf) if name[0] != '_'}) +par_funcs = types.SimpleNamespace( + **{name: wrap_mathfunc(getattr(sf, name)) for name in dir(sf) if name[0] != "_"} +) """SimpleNamespace: Namespace of mathematical functions for manipulating Parameters. Consists of all :mod:`sympy.functions` public members, which we wrap with :func:`wrap_mathfunc`. """ @@ -184,8 +191,8 @@ def do_evaluate(p): vals = [k._eval_evalf(None) for k in atoms] # use the tensorflow printer if any of the symbolic parameter values are TF objects # (we do it like this to avoid importing tensorflow if it's not needed) - is_tf = (type(v).__module__.startswith('tensorflow') for v in vals) - printer = 'tensorflow' if any(is_tf) else 'numpy' + is_tf = (type(v).__module__.startswith("tensorflow") for v in vals) + printer = "tensorflow" if any(is_tf) else "numpy" func = sympy.lambdify(atoms, p, printer) return func(*vals) @@ -220,12 +227,13 @@ def par_convert(args, prog): Returns: list[Any]: converted arguments """ + def do_convert(a): if isinstance(a, sympy.Basic): # substitute SF symbolic parameter objects for Blackbird ones s = {} for k in a.atoms(sympy.Symbol): - if k.name[0] == 'q': + if k.name[0] == "q": s[k] = MeasuredParameter(prog.register[int(k.name[1:])]) else: s[k] = prog.params(k.name) # free parameter @@ -273,7 +281,7 @@ def par_str(p): return str(p) if par_is_symbolic(p): return str(p) - return '{:.4g}'.format(p) # scalar parameters + return "{:.4g}".format(p) # scalar parameters class MeasuredParameter(sympy.Symbol): @@ -296,11 +304,11 @@ class MeasuredParameter(sympy.Symbol): def __new__(cls, regref): # sympy.Basic.__new__ wants a name, other arguments must not end up in self._args - return super().__new__(cls, 'q'+str(regref.ind)) + return super().__new__(cls, "q" + str(regref.ind)) def __init__(self, regref): if not regref.active: - raise ValueError('Trying to use an inactive RegRef.') + raise ValueError("Trying to use an inactive RegRef.") #: RegRef: the value of the parameter depends on this RegRef, and can only be evaluated after the corresponding subsystem has been measured self.regref = regref @@ -309,7 +317,7 @@ def _sympystr(self, printer): The Sympy printing system uses this method instead of __str__. """ - return 'q{}'.format(self.regref.ind) + return "q{}".format(self.regref.ind) def _eval_evalf(self, prec): """Returns the numeric result of the measurement if it is available. @@ -322,7 +330,11 @@ def _eval_evalf(self, prec): """ res = self.regref.val if res is None: - raise ParameterError("{}: trying to use a nonexistent measurement result (e.g., before it has been measured).".format(self)) + raise ParameterError( + "{}: trying to use a nonexistent measurement result (e.g., before it has been measured).".format( + self + ) + ) return res @@ -332,6 +344,7 @@ class FreeParameter(sympy.Symbol): Args: name (str): name of the free parameter """ + def __init__(self, name): #: str: name of the free parameter self.name = name @@ -345,7 +358,7 @@ def _sympystr(self, printer): The Sympy printing system uses this method instead of __str__. """ - return '{{{}}}'.format(self.name) + return "{{{}}}".format(self.name) def _eval_evalf(self, prec): """Returns the value of the parameter if it has been bound, or the default value if not. diff --git a/strawberryfields/program.py b/strawberryfields/program.py index 4d48a2712..5d93f43c5 100644 --- a/strawberryfields/program.py +++ b/strawberryfields/program.py @@ -126,6 +126,7 @@ class Program: Alternatively, another Program instance from which to inherit the register state. name (str): program name (optional) """ + def __init__(self, num_subsystems, name=None): #: str: program name self.name = name @@ -165,7 +166,9 @@ def __init__(self, num_subsystems, name=None): self.reg_refs = copy.deepcopy(parent.reg_refs) # independent copy of the RegRefs self.unused_indices = copy.copy(parent.unused_indices) else: - raise TypeError('First argument must be either the number of subsystems or the parent Program.') + raise TypeError( + "First argument must be either the number of subsystems or the parent Program." + ) # save the initial regref state #: dict[int, RegRef]: like reg_refs @@ -236,7 +239,7 @@ def __enter__(self): if pu.Program_current_context is None: pu.Program_current_context = self else: - raise RuntimeError('Only one Program context can be active at a time.') + raise RuntimeError("Only one Program context can be active at a time.") return self.register def __exit__(self, ex_type, ex_value, ex_tb): @@ -287,13 +290,13 @@ def _add_subsystems(self, n): tuple[RegRef]: tuple of the newly added subsystem references """ if self.locked: - raise CircuitError('The Program is locked, no new subsystems can be created.') + raise CircuitError("The Program is locked, no new subsystems can be created.") if not isinstance(n, numbers.Integral) or n < 1: - raise ValueError('Number of added subsystems {} is not a positive integer.'.format(n)) + raise ValueError("Number of added subsystems {} is not a positive integer.".format(n)) first_unassigned_index = len(self.reg_refs) # create a list of RegRefs - inds = [first_unassigned_index+i for i in range(n)] + inds = [first_unassigned_index + i for i in range(n)] refs = tuple(RegRef(i) for i in inds) # add them to the index map for r in refs: @@ -355,7 +358,7 @@ def _index_to_regref(self, ind): """ # index must be found in the dict if ind not in self.reg_refs: - raise RegRefError('Subsystem {} does not exist.'.format(ind)) + raise RegRefError("Subsystem {} does not exist.".format(ind)) return self.reg_refs[ind] def _test_regrefs(self, reg): @@ -378,18 +381,18 @@ def _test_regrefs(self, reg): if isinstance(rr, RegRef): # regref must be found in the dict values (the RegRefs are compared using __eq__, which, since we do not define it, defaults to "is") if rr not in self.reg_refs.values(): - raise RegRefError('Unknown RegRef.') + raise RegRefError("Unknown RegRef.") if self.reg_refs[rr.ind] is not rr: - raise RegRefError('RegRef state has become inconsistent.') + raise RegRefError("RegRef state has become inconsistent.") elif isinstance(rr, numbers.Integral): rr = self._index_to_regref(rr) else: - raise RegRefError('Subsystems can only be indexed using integers and RegRefs.') + raise RegRefError("Subsystems can only be indexed using integers and RegRefs.") if not rr.active: - raise RegRefError('Subsystem {} has already been deleted.'.format(rr.ind)) + raise RegRefError("Subsystem {} has already been deleted.".format(rr.ind)) if rr in temp: - raise RegRefError('Trying to act on the same subsystem more than once.') + raise RegRefError("Trying to act on the same subsystem more than once.") temp.append(rr) return temp @@ -403,7 +406,7 @@ def append(self, op, reg): list[RegRef]: subsystem list as RegRefs """ if self.locked: - raise CircuitError('The Program is locked, no more Commands can be appended to it.') + raise CircuitError("The Program is locked, no more Commands can be appended to it.") # test that the target subsystem references are ok reg = self._test_regrefs(reg) @@ -470,7 +473,11 @@ def compile(self, target, **kwargs): elif target in specs.circuit_db: db = specs.circuit_db[target]() else: - raise ValueError("Could not find target '{}' in the Strawberry Fields circuit database.".format(target)) + raise ValueError( + "Could not find target '{}' in the Strawberry Fields circuit database.".format( + target + ) + ) if db.modes is not None: # subsystems may be created and destroyed, this is total number that has ever existed @@ -483,14 +490,14 @@ def compile(self, target, **kwargs): seq = db.decompose(self.circuit) - if kwargs.get('warn_connected', True): + if kwargs.get("warn_connected", True): DAG = pu.list_to_DAG(seq) temp = nx.algorithms.components.number_weakly_connected_components(DAG) if temp > 1: - warnings.warn('The circuit consists of {} disconnected components.'.format(temp)) + warnings.warn("The circuit consists of {} disconnected components.".format(temp)) # run optimizations - if kwargs.get('optimize', False): + if kwargs.get("optimize", False): seq = pu.optimize_circuit(seq) # does the circuit spec have its own compilation method? @@ -530,7 +537,7 @@ def optimize(self): opt.circuit = pu.optimize_circuit(self.circuit) return opt - def draw_circuit(self, tex_dir='./circuit_tex', write_to_file=True): + def draw_circuit(self, tex_dir="./circuit_tex", write_to_file=True): r"""Draw the circuit using the Qcircuit :math:`\LaTeX` package. This will generate the LaTeX code required to draw the quantum circuit @@ -597,11 +604,13 @@ def params(self, *args): ret = [] for a in args: if not isinstance(a, str): - raise TypeError('Parameter names must be strings.') + raise TypeError("Parameter names must be strings.") if a not in self.free_params: if self.locked: - raise CircuitError('The Program is locked, no more free parameters can be created.') + raise CircuitError( + "The Program is locked, no more free parameters can be created." + ) p = FreeParameter(a) self.free_params[a] = p else: diff --git a/strawberryfields/program_utils.py b/strawberryfields/program_utils.py index 393826762..53166f827 100644 --- a/strawberryfields/program_utils.py +++ b/strawberryfields/program_utils.py @@ -23,9 +23,20 @@ from .parameters import MeasuredParameter -__all__ = ['Program_current_context', 'RegRefError', 'CircuitError', 'MergeFailure', - 'Command', 'RegRef', - 'list_to_grid', 'grid_to_DAG', 'DAG_to_list', 'list_to_DAG', 'group_operations', 'optimize_circuit'] +__all__ = [ + "Program_current_context", + "RegRefError", + "CircuitError", + "MergeFailure", + "Command", + "RegRef", + "list_to_grid", + "grid_to_DAG", + "DAG_to_list", + "list_to_DAG", + "group_operations", + "optimize_circuit", +] Program_current_context = None @@ -69,6 +80,7 @@ class Command: reg (Sequence[RegRef]): Subsystems to which the operation is applied. Note that the order matters here. """ + # pylint: disable=too-few-public-methods def __init__(self, op, reg): @@ -136,15 +148,16 @@ class RegRef: Args: ind (int): index of the register subsystem referred to """ + # pylint: disable=too-few-public-methods def __init__(self, ind): - self.ind = ind #: int: subsystem index + self.ind = ind #: int: subsystem index self.val = None #: float, complex: Measurement result. None if the subsystem has not been measured yet. self.active = True #: bool: True at construction, False after the subsystem is deleted def __str__(self): - return 'q[{}]'.format(self.ind) + return "q[{}]".format(self.ind) def __hash__(self): """Hashing method. @@ -160,7 +173,7 @@ def __eq__(self, other): NOTE: Affects the hashability of RegRefs, see also :meth:`__hash__`. """ if other.__class__ != self.__class__: - print('--------------- regref.__eq__: compared reqref to ', other.__class__) + print("--------------- regref.__eq__: compared reqref to ", other.__class__) return False return self.ind == other.ind and self.active == other.active @@ -174,12 +187,11 @@ def par(self): return MeasuredParameter(self) - - # ================= # Utility functions # ================= + def list_to_grid(ls): """Transforms a list of Commands to a grid representation. @@ -219,7 +231,7 @@ def grid_to_DAG(grid): DAG.add_node(q[0]) for i in range(1, len(q)): # add the edge between the operations, and the operation nodes themselves - DAG.add_edge(q[i-1], q[i]) + DAG.add_edge(q[i - 1], q[i]) return DAG @@ -335,13 +347,14 @@ def optimize_circuit(seq): Returns: List[Command]: optimized circuit """ + def _print_list(i, q, print_fn=print): "For debugging." # pylint: disable=unreachable return - print_fn('i: {}, len: {} '.format(i, len(q)), end='') + print_fn("i: {}, len: {} ".format(i, len(q)), end="") for x in q: - print_fn(x.op, ', ', end='') + print_fn(x.op, ", ", end="") print_fn() grid = list_to_grid(seq) @@ -353,11 +366,11 @@ def _print_list(i, q, print_fn=print): q = grid[k] i = 0 # index along the wire _print_list(i, q) - while i+1 < len(q): + while i + 1 < len(q): # at least two operations left to merge on this wire try: a = q[i] - b = q[i+1] + b = q[i + 1] # the ops must have equal size and act on the same wires if a.op.ns == b.op.ns and a.reg == b.reg: if a.op.ns != 1: @@ -368,7 +381,7 @@ def _print_list(i, q, print_fn=print): continue op = a.op.merge(b.op) # merge was successful, delete the old ops - del q[i:i+2] + del q[i : i + 2] # insert the merged op (unless it's identity) if op is not None: q.insert(i, Command(op, a.reg)) diff --git a/strawberryfields/utils.py b/strawberryfields/utils.py index 0ffee7d5a..5b0a7ad2e 100644 --- a/strawberryfields/utils.py +++ b/strawberryfields/utils.py @@ -61,7 +61,6 @@ from .ops import Gate, Channel, Ket - # ------------------------------------------------------------------------ # State functions - Fock basis and Gaussian basis | # ------------------------------------------------------------------------