Skip to content

Commit 67045dc

Browse files
author
Corey Ostrove
committed
Merge remote-tracking branch 'origin/bugfix'
2 parents c50127c + 117fb15 commit 67045dc

File tree

21 files changed

+963
-634
lines changed

21 files changed

+963
-634
lines changed

jupyter_notebooks/Examples/Leakage-automagic.ipynb

Lines changed: 48 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,12 @@
66
"metadata": {},
77
"outputs": [],
88
"source": [
9-
"from pygsti.modelpacks import smq1Q_XY, smq1Q_ZN\n",
9+
"from pygsti.modelpacks import smq1Q_XYI as mp\n",
1010
"from pygsti.tools.leakage import leaky_qubit_model_from_pspec, construct_leakage_report\n",
1111
"from pygsti.data import simulate_data\n",
12-
"from pygsti.protocols import StandardGST, ProtocolData"
12+
"from pygsti.protocols import StandardGST, ProtocolData\n",
13+
"import numpy as np\n",
14+
"import scipy.linalg as la"
1315
]
1416
},
1517
{
@@ -27,14 +29,51 @@
2729
"metadata": {},
2830
"outputs": [],
2931
"source": [
30-
"mp = smq1Q_XY\n",
31-
"ed = mp.create_gst_experiment_design(max_max_length=32)\n",
32+
"def with_leaky_gate(m, gate_label, strength):\n",
33+
" rng = np.random.default_rng(0)\n",
34+
" v = np.concatenate([[0.0], rng.standard_normal(size=(2,))])\n",
35+
" v /= la.norm(v)\n",
36+
" H = v.reshape((-1, 1)) @ v.reshape((1, -1))\n",
37+
" H *= strength\n",
38+
" U = la.expm(1j*H)\n",
39+
" m_copy = m.copy()\n",
40+
" G_ideal = m_copy.operations[gate_label]\n",
41+
" from pygsti.modelmembers.operations import ComposedOp, StaticUnitaryOp\n",
42+
" m_copy.operations[gate_label] = ComposedOp([G_ideal, StaticUnitaryOp(U, basis=m.basis)])\n",
43+
" return m_copy, v\n"
44+
]
45+
},
46+
{
47+
"cell_type": "code",
48+
"execution_count": null,
49+
"metadata": {},
50+
"outputs": [],
51+
"source": [
52+
"ed = mp.create_gst_experiment_design(max_max_length=8)\n",
53+
"# ^ The default max length is small so we don't have to wait as long \n",
54+
"# for the GST fit (just for purposes of this notebook).\n",
3255
"tm3 = leaky_qubit_model_from_pspec(mp.processor_spec(), mx_basis='l2p1')\n",
33-
"# ^ We could use basis = 'gm' instead of 'l2p1'. We prefer 'l2p1'\n",
34-
"# because it makes process matrices easier to interpret in leakage\n",
35-
"# modeling.\n",
36-
"ds = simulate_data(tm3, ed.all_circuits_needing_data, num_samples=1000, seed=1997)\n",
37-
"gst = StandardGST( modes=('CPTPLND',), target_model=tm3, verbosity=2)\n",
56+
"# ^ Target model. \"Leaky\" is a bit of a misnomer here. The returned model\n",
57+
"# is simply a qutrit lift of the qubit model; leakage erorrs in the\n",
58+
"# qubit model can manifest as CPTP Markovian errors in the qutrit model.\n",
59+
"dgm3, leaking_state = with_leaky_gate(tm3, ('Gxpi2', 0), strength=0.125)\n",
60+
"# ^ Data generating model. \n",
61+
"num_samples = 100_000\n",
62+
"# ^ The number of samples is large to compensate for short circuit length.\n",
63+
"# Feel free to change the number of samples to something more \"realistic\"\n",
64+
"# if you'd like.\n",
65+
"if num_samples > 10_000:\n",
66+
" from pygsti.objectivefns import objectivefns\n",
67+
" objectivefns.DEFAULT_MIN_PROB_CLIP = objectivefns.DEFAULT_RADIUS = 1e-12\n",
68+
" # ^ There are numerical thresholding rules in objective function evaluation\n",
69+
" # that lead to errors when the number of samples is extremely large.\n",
70+
" # The lines above change those thresholding rules to be appropriate in\n",
71+
" # the unusual setting that is this notebook.\n",
72+
"ds = simulate_data(dgm3, ed.all_circuits_needing_data, num_samples=num_samples, seed=1997)\n",
73+
"gst = StandardGST(\n",
74+
" modes=('CPTPLND',), target_model=tm3, verbosity=2,\n",
75+
" badfit_options={'actions': ['wildcard1d'], 'threshold': 0.0}\n",
76+
")\n",
3877
"pd = ProtocolData(ed, ds)\n",
3978
"res = gst.run(pd)"
4079
]

pygsti/algorithms/gaugeopt.py

Lines changed: 25 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
TrivialGaugeGroupElement as _TrivialGaugeGroupElement,
2525
GaugeGroupElement as _GaugeGroupElement
2626
)
27+
from pygsti.models import ExplicitOpModel
28+
from pygsti.modelmembers.operations import LinearOperator as _LinearOperator
2729

2830
from typing import Callable, Union, Optional
2931

@@ -360,8 +362,10 @@ def _call_jacobian_fn(gauge_group_el_vec):
360362

361363
GGElJacobian = Union[None, Callable[[_GaugeGroupElement], _np.ndarray]]
362364

365+
LabelLike = Union[str, _baseobjs.Label]
363366

364-
def _create_objective_fn(model, target_model, item_weights: Optional[dict[str,float]]=None,
367+
368+
def _create_objective_fn(model, target_model, item_weights: Optional[dict[LabelLike, float]]=None,
365369
cptp_penalty_factor: float=0.0, spam_penalty_factor: float=0.0,
366370
gates_metric="frobenius", spam_metric="frobenius",
367371
method=None, comm=None, check_jac=False, n_leak=0) -> tuple[GGElObjective, GGElJacobian]:
@@ -625,8 +629,14 @@ def _mock_objective_fn(v):
625629
# ^ It would be equivalent to set this to a pair of IdentityOperator objects.
626630

627631
def _objective_fn(gauge_group_el, oob_check):
628-
mdl = _transform_with_oob_check(model, gauge_group_el, oob_check)
629-
ret = 0
632+
mdl : ExplicitOpModel = _transform_with_oob_check(model, gauge_group_el, oob_check)
633+
mdl_ops : dict[_baseobjs.Label, _LinearOperator] = mdl._excalc().operations
634+
tgt_ops : dict[_baseobjs.Label, _LinearOperator] = dict()
635+
if target_model is not None:
636+
tgt_ops.update(target_model._excalc().operations)
637+
# ^ Use these dicts instead of mdl.operations and target_model.operations,
638+
# since these dicts are updated to include instruments.
639+
ret = 0.0
630640

631641
if cptp_penalty_factor > 0:
632642
mdl.basis = mxBasis # set basis for jamiolkowski iso
@@ -645,30 +655,27 @@ def _objective_fn(gauge_group_el, oob_check):
645655
if spam_metric == gates_metric:
646656
val = mdl.frobeniusdist(target_model, transform_mx_arg, item_weights)
647657
else:
648-
wts = item_weights.copy()
649-
wts['spam'] = 0.0
650-
for k in wts:
651-
if k in mdl.preps or k in mdl.povms:
652-
wts[k] = 0.0
653-
val = mdl.frobeniusdist(target_model, transform_mx_arg, wts, n_leak)
658+
non_gates = {'spam'}.union(set(mdl.preps)).union(set(mdl.povms))
659+
temp_wts = {k: (0.0 if k in non_gates else v) for (k, v) in item_weights.items()}
660+
val = mdl.frobeniusdist(target_model, transform_mx_arg, temp_wts)
654661
if "squared" in gates_metric:
655662
val = val ** 2
656663
ret += val
657664

658665
elif gates_metric == "fidelity":
659666
# If n_leak==0, then subspace_entanglement_fidelity is just entanglement_fidelity
660-
for opLbl in mdl.operations:
667+
for opLbl in mdl_ops:
661668
wt = item_weights.get(opLbl, opWeight)
662-
top = target_model.operations[opLbl].to_dense()
663-
mop = mdl.operations[opLbl].to_dense()
669+
top = tgt_ops[opLbl].to_dense()
670+
mop = mdl_ops[opLbl].to_dense()
664671
ret += wt * (1.0 - _tools.subspace_entanglement_fidelity(top, mop, mxBasis, n_leak))**2
665672

666673
elif gates_metric == "tracedist":
667674
# If n_leak==0, then subspace_jtracedist is just jtracedist.
668-
for opLbl in mdl.operations:
675+
for opLbl in mdl_ops:
669676
wt = item_weights.get(opLbl, opWeight)
670-
top = target_model.operations[opLbl].to_dense()
671-
mop = mdl.operations[opLbl].to_dense()
677+
top = tgt_ops[opLbl].to_dense()
678+
mop = mdl_ops[opLbl].to_dense()
672679
ret += wt * _tools.subspace_jtracedist(top, mop, mxBasis, n_leak)
673680

674681
else:
@@ -680,11 +687,9 @@ def _objective_fn(gauge_group_el, oob_check):
680687

681688
if "frobenius" in spam_metric:
682689
# SPAM and gates can have different choices for squared vs non-squared.
683-
wts = item_weights.copy(); wts['gates'] = 0.0
684-
for k in wts:
685-
if k in mdl.operations or k in mdl.instruments:
686-
wts[k] = 0.0
687-
val = mdl.frobeniusdist(target_model, transform_mx_arg, wts)
690+
non_spam = {'gates'}.union(set(mdl_ops)) # use mdl_ops, not mdl.operations!
691+
temp_wts = {k: (0.0 if k in non_spam else v) for (k, v) in item_weights.items()}
692+
val = mdl.frobeniusdist(target_model, transform_mx_arg, temp_wts)
688693
if "squared" in spam_metric:
689694
val = val ** 2
690695
ret += val

pygsti/data/dataset.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -575,18 +575,18 @@ def _get_counts(self, timestamp=None, all_outcomes=False):
575575
cntDict.setitem_unsafe(ol, cnt)
576576
else:
577577
for ol, i in self.dataset.olIndex.items():
578-
inds = oli_tslc[oli_tslc == i]
578+
inds = _np.where(oli_tslc == i)[0]
579579
if len(inds) > 0 or all_outcomes:
580580
cntDict.setitem_unsafe(ol, float(sum(self.reps[tslc][inds])))
581581
else:
582582
if self.reps is None:
583583
for ol_index in oli_tslc:
584584
ol = self.dataset.ol[ol_index]
585-
cntDict.setitem_unsafe(ol, 1.0 + cntDict.getitem_unsafe(ol, 0.0))
585+
cntDict.setitem_unsafe(ol, float(1.0 + cntDict.getitem_unsafe(ol, 0.0)))
586586
else:
587587
for ol_index, reps in zip(oli_tslc, self.reps[tslc]):
588588
ol = self.dataset.ol[ol_index]
589-
cntDict.setitem_unsafe(ol, reps + cntDict.getitem_unsafe(ol, 0.0))
589+
cntDict.setitem_unsafe(ol, float(reps + cntDict.getitem_unsafe(ol, 0.0)))
590590

591591
return cntDict
592592

pygsti/modelmembers/modelmember.py

Lines changed: 98 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,20 @@
1010
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
1111
#***************************************************************************************************
1212

13+
from __future__ import annotations
14+
1315
from collections import OrderedDict
1416
import copy as _copy
1517

1618
import numpy as _np
1719

1820
from pygsti.baseobjs.nicelyserializable import NicelySerializable as _NicelySerializable
21+
from pygsti.pgtypes import SpaceT
1922
from pygsti.tools import listtools as _lt
2023
from pygsti.tools import slicetools as _slct
24+
from pygsti.tools import matrixtools as _mt
25+
26+
from typing import Optional
2127

2228

2329
class ModelChild(object):
@@ -254,6 +260,10 @@ def parent(self):
254260
-------
255261
Model
256262
"""
263+
if '_parent' not in self.__dict__:
264+
# This can be absent because of how serialization works.
265+
# It's set during deserialization in self.relink_parent().
266+
self._parent = None
257267
return self._parent
258268

259269
@parent.setter
@@ -308,11 +318,13 @@ def relink_parent(self, parent):
308318
None
309319
"""
310320
for subm in self.submembers():
311-
subm.relink_parent(parent)
312-
313-
if self._parent is parent: return # OK to relink multiple times
314-
assert(self._parent is None), "Cannot relink parent: parent is not None!"
315-
self._parent = parent # assume no dependent objects
321+
subm.relink_parent(parent)
322+
if '_parent' in self.__dict__:
323+
# This codepath needed to resolve GitHub issue 651.
324+
if self._parent is parent:
325+
return # OK to relink multiple times
326+
assert(self._parent is None), "Cannot relink parent: current parent is not None!"
327+
self._parent = parent
316328

317329
def unlink_parent(self, force=False):
318330
"""
@@ -793,6 +805,77 @@ def copy(self, parent=None, memo=None):
793805
memo[id(self.parent)] = None # so deepcopy uses None instead of copying parent
794806
return self._copy_gpindices(_copy.deepcopy(self, memo), parent, memo)
795807

808+
def to_dense(self) -> _np.ndarray:
809+
raise NotImplementedError('Derived classes must implement .to_dense().')
810+
811+
def _to_transformed_dense(self, T_domain: _mt.OperatorLike, T_codomain: _mt.OperatorLike, on_space: SpaceT='minimal') -> _np.ndarray:
812+
"""
813+
Return an array, XT, obtained by suitably transforming X = self.to_dense(on_space).
814+
815+
The basic nature of the transformation X --> XT depends on the category of `self`,
816+
as determined by its domain and codomain.
817+
818+
| abstract category | domain | codomain |
819+
| ----------------- | ------------ | ------------ |
820+
| vector | field | vector space |
821+
| functional | vector space | field |
822+
| operator | vector space | vector space |
823+
824+
To state the specific transformation X --> XT, let op(X) denote the operator
825+
representation of X obtained by (1) interpreting fields as 1-dimensional vector
826+
spaces, and (2) having linear operators act on vectors by left-multiplication.
827+
828+
The returned array, XT, is defined through its op(XT) representation:
829+
830+
| abstract category | op(XT) representation of XT |
831+
| ----------------- | ----------------------------- |
832+
| vector | T_codomain @ op(X) |
833+
| functional | op(X) @ T_domain |
834+
| operator | T_codomain @ op(X) @ T_domain |
835+
836+
Note that T_domain is ignored for abstract vectors (i.e., state prep), and T_codomain
837+
is ignored for abstract functionals (i.e., POVM effects).
838+
"""
839+
raise NotImplementedError()
840+
841+
def residuals(self, other: ModelMember,
842+
transform: Optional[_mt.OperatorLike]=None, inv_transform: Optional[_mt.OperatorLike]=None
843+
) -> _np.ndarray:
844+
# This implementation was introduced as part of a heavy refactor, but it preserves all intended
845+
# semantics of the old implementation.
846+
T_domain = _mt.to_operatorlike(transform)
847+
T_codomain = _mt.to_operatorlike(inv_transform)
848+
# ^ to_operatorlike casts None to IdentityOperator
849+
X = self._to_transformed_dense(T_domain, T_codomain)
850+
if isinstance(inv_transform, _mt.IdentityOperator):
851+
# Passing inv_transform as an IdentityOperator (rather than casting from None)
852+
# is a flag. It indicates that we want to apply `transform` to `other` as well.
853+
#
854+
# (Yes, this sort of flag interpretation is bad design. No, I don't want to
855+
# spend the time on a good design.)
856+
Y = other._to_transformed_dense(T_domain, inv_transform)
857+
else:
858+
Y = other.to_dense()
859+
return (X - Y).ravel()
860+
861+
def frobeniusdist_squared(self, other: ModelMember,
862+
transform: Optional[_mt.OperatorLike]=None, inv_transform: Optional[_mt.OperatorLike]=None
863+
) -> _np.floating:
864+
"""
865+
Return the squared Frobenius norm of the difference between `self` and `other`,
866+
possibly after transformation by `transform` and/or `inv_transform`.
867+
"""
868+
return _np.linalg.norm(self.residuals(other, transform, inv_transform))**2
869+
870+
def frobeniusdist(self, other: ModelMember,
871+
transform: Optional[_mt.OperatorLike]=None, inv_transform: Optional[_mt.OperatorLike]=None
872+
) -> _np.floating:
873+
"""
874+
Return the Frobenius norm of the difference between `self` and `other`,
875+
possibly after transformation by `transform` and/or `inv_transform`.
876+
"""
877+
return _np.linalg.norm(self.residuals(other, transform, inv_transform))
878+
796879
def _is_similar(self, other, rtol, atol):
797880
""" Returns True if `other` model member (which it guaranteed to be the same type as self) has
798881
the same local structure, i.e., not considering parameter values or submembers """
@@ -857,7 +940,16 @@ def is_equivalent(self, other, rtol=1e-5, atol=1e-8):
857940
"""
858941
if not self.is_similar(other): return False
859942

860-
if not _np.allclose(self.to_vector(), other.to_vector(), rtol=rtol, atol=atol):
943+
try:
944+
v1 = self.to_vector()
945+
v2 = other.to_vector()
946+
except RuntimeError as e:
947+
if 'to_vector() should never be called' not in str(e):
948+
raise e
949+
assert type(self) == type(other)
950+
v1 = self.to_dense()
951+
v2 = other.to_dense()
952+
if not _np.allclose(v1, v2, rtol=rtol, atol=atol):
861953
return False
862954

863955
# Recursive check on submembers

0 commit comments

Comments
 (0)