Skip to content

Commit b958304

Browse files
committed
change datasetconstruction.mix_dataset. Leave comments about efficiency elsewhere.
1 parent fb6ddc6 commit b958304

File tree

3 files changed

+28
-3
lines changed

3 files changed

+28
-3
lines changed

pygsti/data/datasetconstruction.py

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -209,14 +209,27 @@ def simulate_data(model_or_dataset, circuit_list, num_samples,
209209
return dataset
210210

211211

212-
def mix_datasets(dsa, dsb, p, integral=True):
212+
def mix_datasets(dsa, dsb, p, integral=True, choose_machine=False, seed=None):
213213
dsc = dsa.copy_nonstatic()
214214
# arr = _np.array(dsc.repData).ravel()
215215
# print((arr, arr.size))
216216
# print((dsb.repData, dsb.repData.size))
217+
num_circuits = len(dsb)
218+
if choose_machine:
219+
if seed is None:
220+
_warnings.warn('Set the random seed! Using 42.')
221+
seed = 42
222+
rngstate = _np.random.default_rng(seed)
223+
interp_weights = rngstate.uniform(low=0, high=1, size=num_circuits)
224+
interp_weights[interp_weights < p] = 0.0
225+
interp_weights[interp_weights > 0] = 1.0
226+
else:
227+
interp_weights = p * _np.ones(num_circuits)
228+
217229
for i, (_, dsrow) in enumerate(dsb.items()):
218-
interpolated = p*dsc.repData[i] + (1-p)*dsrow.reps
219-
if integral:
230+
p_i = interp_weights[i]
231+
interpolated = p_i * dsc.repData[i] + (1-p_i) * dsrow.reps
232+
if integral and (not choose_machine):
220233
assert interpolated.size == 2
221234
total = int(_np.ceil((_np.sum(interpolated))))
222235
j = _np.argmin(interpolated)

pygsti/objectivefns/objectivefns.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4597,6 +4597,10 @@ def _dterms_fill_penalty(self, paramvec, terms_jac):
45974597
# Ideally we'd have put in the effort to do this earlier when doing finite-difference to get
45984598
# the Jacobian of terms. But we start with a simple implementation.
45994599
#
4600+
# --> TODO: change so that if callable_penalty has a grad method then we'll call it instead
4601+
# of relying on finite-differences over all model parameters. This would help when
4602+
# the penalty is constant w.r.t. some params (like those that only appear in SPAM).
4603+
#
46004604
terms_jac[off:off+1, :] = 0.0
46014605
if self.callable_penalty_factor:
46024606
vec0 = self.model.to_vector()

pygsti/tools/rbtheory.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -323,6 +323,14 @@ def L_matrix(model, target_model, weights=None): # noqa N802
323323
weights[key] = 1.
324324

325325
normalizer = _np.sum(_np.array([weights[key] for key in list(target_model.operations.keys())]))
326+
# TODO: improve efficiency
327+
#
328+
# 1. Accumuate the summands in this list comprehension in-place. (Might already happen but that's non-obvious)
329+
# 2. Use the fact that target gates are unitary and so their superoperator representation inverses should
330+
# be their transposes.
331+
# 3. Have the option to return this matrix as an implicit abstract linear operator, so that anyone who wants
332+
# eigenvalue info can try to get it from an iterative method instead of a full eigendecomposition.
333+
#
326334
L_matrix = (1 / normalizer) * _np.sum(
327335
weights[key] * _np.kron(
328336
model.operations[key].to_dense(on_space='HilbertSchmidt').T,

0 commit comments

Comments
 (0)