Skip to content

HTML Report Updates #572

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 25 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -197,3 +197,29 @@ Questions?
----------
For help and support with pyGSTi, please contact the authors at
pygsti@sandia.gov.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I forgot I added this, I think I did so in response to a request from @dhothem. We should probably also include the citation for Zenodo.

How To Cite pyGSTi
------------------

If you've used pyGSTi in the your research and are interested in citing
us, please consider the following software design paper from some of the
members of our development team (bibtex below):

```
@ARTICLE{Nielsen2020-rd,
title = "Probing quantum processor performance with {py{GST}i}",
author = "Nielsen, Erik and Rudinger, Kenneth and Proctor, Timothy and
Russo, Antonio and Young, Kevin and Blume-Kohout, Robin",
journal = "Quantum Sci. Technol.",
publisher = "IOP Publishing",
volume = 5,
number = 4,
pages = "044002",
month = jul,
year = 2020,
url = "https://iopscience.iop.org/article/10.1088/2058-9565/ab8aa4",
copyright = "http://iopscience.iop.org/page/copyright",
issn = "2058-9565",
doi = "10.1088/2058-9565/ab8aa4"
}
```
20 changes: 11 additions & 9 deletions pygsti/algorithms/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -765,19 +765,21 @@ def run_iterative_gst(dataset, start_model, circuit_lists,

models = []
optimums = []
mdc_store_list = []

for i in range(len(circuit_lists)):
#then do the final iteration slightly differently since the generator should
#give three return values.
if i==len(circuit_lists)-1:
mdl_iter, opt_iter, final_objfn = next(gst_iter_gen)
mdl_iter, opt_iter, mdc_store_iter, final_objfn = next(gst_iter_gen)
else:
mdl_iter, opt_iter = next(gst_iter_gen)
mdl_iter, opt_iter, mdc_store_iter = next(gst_iter_gen)

models.append(mdl_iter)
optimums.append(opt_iter)
mdc_store_list.append(mdc_store_iter)

return models, optimums, final_objfn
return models, optimums, final_objfn, mdc_store_list

def iterative_gst_generator(dataset, start_model, circuit_lists,
optimizer, iteration_objfn_builders, final_objfn_builders,
Expand Down Expand Up @@ -927,7 +929,6 @@ def _max_array_types(artypes_list): # get the maximum number of each array type
first_iter_optimizer = _copy.deepcopy(optimizer) # use a separate copy of optimizer, as it
first_iter_optimizer.fditer = optimizer.first_fditer # is a persistent object (so don't modify!)
opt_result, mdc_store = run_gst_fit(mdc_store, first_iter_optimizer, obj_fn_builder, printer - 1)

else:
opt_result, mdc_store = run_gst_fit(mdc_store, optimizer, obj_fn_builder, printer - 1)
profiler.add_time('run_iterative_gst: iter %d %s-opt' % (i + 1, obj_fn_builder.name), tNxt)
Expand All @@ -944,7 +945,6 @@ def _max_array_types(artypes_list): # get the maximum number of each array type
mdl.basis = start_model.basis
opt_result, mdc_store = run_gst_fit(mdc_store, optimizer, obj_fn_builder, printer - 1)
profiler.add_time('run_iterative_gst: final %s opt' % obj_fn_builder.name, tNxt)

tNxt = _time.time()
printer.log("Final optimization took %.1fs\n" % (tNxt - tRef), 2)
tRef = tNxt
Expand All @@ -954,11 +954,13 @@ def _max_array_types(artypes_list): # get the maximum number of each array type
# Note: initial_mdc_store is *not* an objective fn (it's just a store) so don't send it back.
if mdc_store is not initial_mdc_store:
final_objfn = mdc_store

yield (mdc_store.model, opt_result, final_objfn)
yield (mdc_store.model, opt_result, mdc_store, final_objfn)
else:
#If not the final iteration then only send back a copy of the model and the optimizer results
yield (mdc_store.model.copy(), opt_result)
#If not the final iteration then send back a copy of the model and the optimizer results
#mdc_store gets re-initialized at the start of each circuit list iteration, and doesn't appear
#to get propagated beyond that point so sending it back directly without copying (which would
#probably require implementing a custom method for MDC store objects) should be fairly safe.
yield (mdc_store.model.copy(), opt_result, mdc_store)

printer.log('Iterative GST Total Time: %.1fs' % (_time.time() - tStart))
profiler.add_time('run_iterative_gst: total time', tStart)
Expand Down
4 changes: 4 additions & 0 deletions pygsti/algorithms/gaugeopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,10 @@ def gaugeopt_custom(model, objective_fn, gauge_group=None,
printer = _baseobjs.VerbosityPrinter.create_printer(verbosity, comm)
tStart = _time.time()

#replace model with a new copy of itself so as to not propagate the conversion back to the
#instance of the model object we are gauge optimizing.
model = model.copy()

if comm is not None:
mdl_cmp = comm.bcast(model if (comm.Get_rank() == 0) else None, root=0)
try:
Expand Down
5 changes: 2 additions & 3 deletions pygsti/circuits/circuitlist.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,7 @@ def elementvec_to_array(self, elementvec, layout, mergeop="sum"):
Dictates how to combine the `elementvec` components corresponding to a single
plaquette entry (circuit). If "sum", the returned array contains summed
values. If a format string, e.g. `"%.2f"`, then the so-formatted components
are joined together with separating commas, and the resulting array contains
string (object-type) entries.
are joined together, and the resulting array contains string (object-type) entries.

Returns
-------
Expand All @@ -241,7 +240,7 @@ def elementvec_to_array(self, elementvec, layout, mergeop="sum"):
fmt = mergeop
ret = _np.nan * _np.ones(len(self), dtype=_np.object_)
for i,ckt in enumerate(self._circuits):
ret[i] = ", ".join(["NaN" if _np.isnan(x) else
ret[i] = "".join(["NaN" if _np.isnan(x) else
(fmt % x) for x in elementvec[layout.indices(ckt)]])
else:
raise ValueError("Invalid `mergeop` arg: %s" % str(mergeop))
Expand Down
5 changes: 2 additions & 3 deletions pygsti/circuits/circuitstructure.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,7 @@ def elementvec_to_array(self, elementvec, layout, mergeop="sum"):
Dictates how to combine the `elementvec` components corresponding to a single
plaquette entry (circuit). If "sum", the returned array contains summed
values. If a format string, e.g. `"%.2f"`, then the so-formatted components
are joined together with separating commas, and the resulting array contains
string (object-type) entries.
are joined, and the resulting array contains string (object-type) entries.

Returns
-------
Expand All @@ -155,7 +154,7 @@ def elementvec_to_array(self, elementvec, layout, mergeop="sum"):
fmt = mergeop
ret = _np.nan * _np.ones((self.num_rows, self.num_cols), dtype=_np.object_)
for (i, j), opstr in self.elements.items():
ret[i, j] = ", ".join(["NaN" if _np.isnan(x) else
ret[i, j] = "".join(["NaN" if _np.isnan(x) else
(fmt % x) for x in elementvec[layout.indices(opstr)]])
else:
raise ValueError("Invalid `mergeop` arg: %s" % str(mergeop))
Expand Down
20 changes: 14 additions & 6 deletions pygsti/modelmembers/operations/lindbladerrorgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,15 +259,20 @@ def from_error_generator(cls, errgen_or_dim, parameterization="CPTPLND", element
terms up to some order). `"cterm"` is similar but uses Clifford operation
action on stabilizer states.

state_space : `StateSpace` or castable to `StateSpace`
The state space upon which this error generator acts.
state_space : StateSpace, optional (default None)
StateSpace object to use in construction of this LindbladErrorgen.
If None we use the function `pygsti.baseobjs.statespace.default_space_for_dim`
to infer the correct state space from the dimensions of the passed in
error generator.

Returns
-------
`LindbladErrorgen`
"""
errgen = _np.zeros((errgen_or_dim, errgen_or_dim), 'd') \
if isinstance(errgen_or_dim, (int, _np.int64)) else errgen_or_dim
if isinstance(errgen_or_dim, (int, _np.int64)):
errgen = _np.zeros((errgen_or_dim, errgen_or_dim), 'd')
else:
errgen = errgen_or_dim
return cls._from_error_generator(errgen, parameterization, elementary_errorgen_basis,
mx_basis, truncate, evotype, state_space)

Expand Down Expand Up @@ -321,8 +326,11 @@ def from_error_generator_and_blocks(cls, errgen_or_dim, lindblad_coefficient_blo
terms up to some order). `"cterm"` is similar but uses Clifford operation
action on stabilizer states.

state_space : `StateSpace` or castable to `StateSpace`
The state space upon which this error generator acts.
state_space : StateSpace, optional (default None)
StateSpace object to use in construction of this LindbladErrorgen.
If None we use the function `pygsti.baseobjs.statespace.default_space_for_dim`
to infer the correct state space from the dimensions of the passed in
error generator.

Returns
-------
Expand Down
8 changes: 6 additions & 2 deletions pygsti/protocols/estimate.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,8 @@ def __init__(self, parent, models=None, parameters=None, extra_parameters=None):
self._final_objfn_cache = parameters.get('final_objfn_cache', None)
self.final_objfn_builder = parameters.get('final_objfn_builder', _objfns.PoissonPicDeltaLogLFunction.builder())
self._final_objfn = parameters.get('final_objfn', None)
self._per_iter_mdc_store = parameters.get('per_iter_mdc_store', None)


self.extra_parameters = extra_parameters if (extra_parameters is not None) else {}

Expand Down Expand Up @@ -193,20 +195,22 @@ def __init__(self, parent, models=None, parameters=None, extra_parameters=None):
'_final_objfn_cache': 'dir-serialized-object',
'final_objfn_builder': 'serialized-object',
'_final_objfn': 'reset',
'_gaugeopt_suite': 'serialized-object'
'_gaugeopt_suite': 'serialized-object',
'_per_iter_mdc_store': 'reset'
}

@property
def parameters(self):
#HACK for now, until we can remove references that access these parameters
parameters = _collections.OrderedDict()
parameters = dict()
parameters['protocol'] = self.protocol # Estimates can hold sub-Protocols <=> sub-results
parameters['profiler'] = self.profiler
parameters['final_mdc_store'] = self._final_mdc_store
parameters['final_objfn'] = self._final_objfn
parameters['final_objfn_cache'] = self._final_objfn_cache
parameters['final_objfn_builder'] = self.final_objfn_builder
parameters['weights'] = self.circuit_weights
parameters['per_iter_mdc_store'] = self._per_iter_mdc_store
parameters.update(self.extra_parameters)
#parameters['raw_objective_values']
#parameters['model_test_values']
Expand Down
17 changes: 12 additions & 5 deletions pygsti/protocols/gst.py
Original file line number Diff line number Diff line change
Expand Up @@ -1357,6 +1357,7 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N
if disable_checkpointing:
seed_model = mdl_start.copy()
mdl_lsgst_list = []
mdc_store_list = []
starting_idx = 0
else:
# Set the checkpoint_path variable if None
Expand All @@ -1376,6 +1377,7 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N
if checkpoint is None:
seed_model = mdl_start.copy()
mdl_lsgst_list = []
mdc_store_list = []
checkpoint = GateSetTomographyCheckpoint()
elif isinstance(checkpoint, GateSetTomographyCheckpoint):
# if the checkpoint's last completed iteration is non-negative
Expand All @@ -1393,8 +1395,12 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N
# left set to None. There looks to be some logic for handling this and it looks
# like the serialization routines effectively do this already, as the value
# of this is lost between writing and reading.
mdc_store_list = [None]*len(mdl_lsgst_list) #We don't presently have serialization support for
#MDC store objects, so for now we'll skip serializing this and re-initialize previous iterations
#to None. Given how this is currently used the only downside to this should be inefficiency
#rebuilding the needed MDC stores in the report generation.
else:
NotImplementedError(
raise NotImplementedError(
'The only currently valid checkpoint inputs are None and GateSetTomographyCheckpoint.')

# note the last_completed_iter value is initialized to -1 so the below line
Expand All @@ -1420,10 +1426,11 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N
#then do the final iteration slightly differently since the generator should
#give three return values.
if i==len(bulk_circuit_lists)-1:
mdl_iter, opt_iter, final_objfn = next(gst_iter_generator)
mdl_iter, opt_iter, mdc_store_iter, final_objfn = next(gst_iter_generator)
else:
mdl_iter, opt_iter = next(gst_iter_generator)
mdl_iter, opt_iter, mdc_store_iter = next(gst_iter_generator)
mdl_lsgst_list.append(mdl_iter)
mdc_store_list.append(mdc_store_iter)
optima_list.append(opt_iter)

if not disable_checkpointing:
Expand All @@ -1436,15 +1443,15 @@ def run(self, data, memlimit=None, comm=None, checkpoint=None, checkpoint_path=N
checkpoint.write(f'{checkpoint_path}_iteration_{i}.json')

tnxt = _time.time(); profiler.add_time('GST: total iterative optimization', tref); tref = tnxt

#set parameters
parameters = _collections.OrderedDict()
parameters = dict()
parameters['protocol'] = self # Estimates can hold sub-Protocols <=> sub-results
parameters['final_objfn_builder'] = self.objfn_builders.final_builders[-1] \
if len(self.objfn_builders.final_builders) > 0 else self.objfn_builders.iteration_builders[-1]
parameters['final_objfn'] = final_objfn # Final obj. function evaluated at best-fit point (cache too)
parameters['final_mdc_store'] = final_objfn # Final obj. function is also a "MDC store"
parameters['profiler'] = profiler
parameters['per_iter_mdc_store'] = mdc_store_list #list of the MDC stores for each iteration.
# Note: we associate 'final_cache' with the Estimate, which means we assume that *all*
# of the models in the estimate can use same evaltree, have the same default prep/POVMs, etc.

Expand Down
15 changes: 13 additions & 2 deletions pygsti/report/colormaps.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,19 @@
def _vnorm(x, vmin, vmax):
#Perform linear mapping from [vmin,vmax] to [0,1]
# (which is just a *part* of the full mapping performed)
if _np.isclose(vmin, vmax): return _np.ma.zeros(x.shape, 'd')
return _np.clip((x - vmin) / (vmax - vmin), 0.0, 1.0)
if abs(vmin - vmax) < (1e-8 + 1e-5*vmax): #inline previous np.isclose call
return _np.ma.zeros(x.shape, 'd')
#In versions of numpy from 1.17-1.24 the function np.clip
#had an issue that caused it to run much slower than it should.
#See: https://github.com/numpy/numpy/issues/14281
#The workaround at that time was to use this function (which in the
#stated versions is 10x faster).
#This is has since been patched in 1.25+, but not everyone
#is/can be using that recent of a version, and I don't see any
#downside to using the version from core.umath for now.
#TODO: switch back to np.clip once we're confident most users are
#on 1.25+
return _np.core.umath.clip((x - vmin) / (vmax - vmin), 0.0, 1.0)


@smart_cached
Expand Down
Loading