From 606704d4d7609b7a7ab0ecf9ceaf25966a49efb5 Mon Sep 17 00:00:00 2001 From: frosati1 Date: Wed, 15 Jun 2022 14:47:24 +0200 Subject: [PATCH 01/14] ts calculated in Exp and returning dynamics's generators from model --- c3/experiment.py | 13 ++++++- c3/libraries/propagation.py | 71 +++++++++---------------------------- c3/model.py | 41 +++++++++++++++++++++ 3 files changed, 70 insertions(+), 55 deletions(-) diff --git a/c3/experiment.py b/c3/experiment.py index 21e49c2e..3d12de94 100755 --- a/c3/experiment.py +++ b/c3/experiment.py @@ -503,7 +503,18 @@ def compute_propagators(self): ) U = result["U"] dUs = result["dUs"] - self.ts = result["ts"] + signal = generator.generate_signals(instr) + ts_list = [sig["ts"][1:] for sig in signal.values()] + ts = tf.constant(tf.math.reduce_mean(ts_list, axis=0)) + if not np.all( + tf.math.reduce_variance(ts_list, axis=0) < 1e-5 * (ts[1] - ts[0]) + ): + raise Exception("C3Error:Something with the times happend.") + if not np.all( + tf.math.reduce_variance(ts[1:] - ts[:-1]) < 1e-5 * (ts[1] - ts[0]) # type: ignore + ): + raise Exception("C3Error:Something with the times happend.") + self.ts = ts if model.use_FR: # TODO change LO freq to at the level of a line freqs = {} diff --git a/c3/libraries/propagation.py b/c3/libraries/propagation.py index e711788a..df796f6b 100644 --- a/c3/libraries/propagation.py +++ b/c3/libraries/propagation.py @@ -242,37 +242,12 @@ def pwc(model: Model, gen: Generator, instr: Instruction, folding_stack: list) - """ signal = gen.generate_signals(instr) # Why do I get 0.0 if I print gen.resolution here?! FR - ts = [] - if model.controllability: - h0, hctrls = model.get_Hamiltonians() - signals = [] - hks = [] - for key in signal: - signals.append(signal[key]["values"]) - ts = signal[key]["ts"] - hks.append(hctrls[key]) - signals = tf.cast(signals, tf.complex128) - hks = tf.cast(hks, tf.complex128) - else: - h0 = model.get_Hamiltonian(signal) - ts_list = [sig["ts"][1:] for sig in signal.values()] - ts = tf.constant(tf.math.reduce_mean(ts_list, axis=0)) - hks = None - signals = None - if not np.all( - tf.math.reduce_variance(ts_list, axis=0) < 1e-5 * (ts[1] - ts[0]) - ): - raise Exception("C3Error:Something with the times happend.") - if not np.all( - tf.math.reduce_variance(ts[1:] - ts[:-1]) < 1e-5 * (ts[1] - ts[0]) # type: ignore - ): - raise Exception("C3Error:Something with the times happend.") - - dt = ts[1] - ts[0] - - batch_size = tf.constant(len(h0), tf.int32) - - dUs = tf_batch_propagate(h0, hks, signals, dt, batch_size=batch_size) + + dynamics_generators = model.get_dynamics_generators(signal) + + batch_size = tf.constant(len(dynamics_generators[0]), tf.int32) + + dUs = tf_batch_propagate(dynamics_generators, batch_size=batch_size) # U = tf_matmul_left(tf.cast(dUs, tf.complex128)) U = tf_matmul_n(dUs, folding_stack) @@ -281,7 +256,7 @@ def pwc(model: Model, gen: Generator, instr: Instruction, folding_stack: list) - U = model.blowup_excitations(tf_matmul_left(tf.cast(dUs, tf.complex128))) dUs = tf.vectorized_map(model.blowup_excitations, dUs) - return {"U": U, "dUs": dUs, "ts": ts} + return {"U": U, "dUs": dUs} #################### @@ -400,7 +375,7 @@ def pwc_trott_drift(h0, hks, cflds_t, dt): return dUs -def tf_batch_propagate(hamiltonian, hks, signals, dt, batch_size): +def tf_batch_propagate(dyn_gens, batch_size): """ Propagate signal in batches Parameters @@ -420,32 +395,20 @@ def tf_batch_propagate(hamiltonian, hks, signals, dt, batch_size): ------- """ - if signals is not None: - batches = int(tf.math.ceil(signals.shape[0] / batch_size)) - batch_array = tf.TensorArray( - signals.dtype, size=batches, dynamic_size=False, infer_shape=False - ) - for i in range(batches): - batch_array = batch_array.write( - i, signals[i * batch_size : i * batch_size + batch_size] - ) - else: - batches = int(tf.math.ceil(hamiltonian.shape[0] / batch_size)) - batch_array = tf.TensorArray( - hamiltonian.dtype, size=batches, dynamic_size=False, infer_shape=False + + batches = int(tf.math.ceil(dyn_gens.shape[0] / batch_size)) + batch_array = tf.TensorArray( + dyn_gens.dtype, size=batches, dynamic_size=False, infer_shape=False + ) + for i in range(batches): + batch_array = batch_array.write( + i, dyn_gens[i * batch_size : i * batch_size + batch_size] ) - for i in range(batches): - batch_array = batch_array.write( - i, hamiltonian[i * batch_size : i * batch_size + batch_size] - ) dUs_array = tf.TensorArray(tf.complex128, size=batches, infer_shape=False) for i in range(batches): x = batch_array.read(i) - if signals is not None: - result = tf_propagation_vectorized(hamiltonian, hks, x, dt) - else: - result = tf_propagation_vectorized(x, None, None, dt) + result = tf.linalg.expm(x) dUs_array = dUs_array.write(i, result) return dUs_array.concat() diff --git a/c3/model.py b/c3/model.py index 432bc2b8..cbd195a7 100755 --- a/c3/model.py +++ b/c3/model.py @@ -378,6 +378,47 @@ def get_Hamiltonian(self, signal=None): return signal_hamiltonian + def get_Liouvillian(self, signal): + h = self.get_Hamiltonian() + col_ops = self.get_Lindbladians() + if self.max_excitations: + col_ops = self.cut_excitations(col_ops) + + coher_superop = -1j * (tf_utils.tf_spre(h) - tf_utils.tf_spost(h)) + for col_op in col_ops: + super_clp = tf.matmul( + tf_utils.tf_spre(col_op), + tf_utils.tf_spost(tf.linalg.adjoint(col_op)), + ) + anticomm_L_clp = 0.5 * tf.matmul( + tf_utils.tf_spre(tf.linalg.adjoint(col_op)), + tf_utils.tf_spre(col_op), + ) + anticomm_R_clp = 0.5 * tf.matmul( + tf_utils.tf_spost(col_op), + tf_utils.tf_spost(tf.linalg.adjoint(col_op)), + ) + liouvillian = coher_superop + super_clp - anticomm_L_clp - anticomm_R_clp + return liouvillian + + def get_dynamics_generators(self, signal): + """Returns Tensor of Hamiltonians if model.lindbladian is False, + otherwise it returns as a Tensor of superoperators + the Liouvillians solving the Lindblad Master Equation + """ + if self.lindbladian: + dyn_gens = self.get_Liouvillian(signal) + else: + dyn_gens = -1j * self.get_Hamiltonian(signal) + + ts = [] + ts_list = [sig["ts"][1:] for sig in signal.values()] + ts = tf.constant(tf.math.reduce_mean(ts_list, axis=0)) + + dt = tf.cast(ts[1] - ts[0], dtype=tf.complex128) + + return dyn_gens * dt + def get_sparse_Hamiltonian(self, signal=None): return self.blowup_excitations(self.get_Hamiltonian(signal)) From efc500b7f2b28f44c076ff0fc17d12780d791fa5 Mon Sep 17 00:00:00 2001 From: frosati1 Date: Thu, 16 Jun 2022 00:22:11 +0200 Subject: [PATCH 02/14] ts adjusted, propagation still bugged --- c3/libraries/propagation.py | 18 ++++++++---------- c3/model.py | 9 ++++++++- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/c3/libraries/propagation.py b/c3/libraries/propagation.py index df796f6b..d2bc6777 100644 --- a/c3/libraries/propagation.py +++ b/c3/libraries/propagation.py @@ -380,20 +380,18 @@ def tf_batch_propagate(dyn_gens, batch_size): Propagate signal in batches Parameters ---------- - hamiltonian: tf.tensor - Drift Hamiltonian - hks: Union[tf.tensor, List[tf.tensor]] - List of control hamiltonians - signals: Union[tf.tensor, List[tf.tensor]] - List of control signals, one per control hamiltonian - dt: float - Length of one time slice + dyn_gens: tf.tensor + i) -1j * Hamiltonian(t) * dt + or + ii) -1j * Liouville superoperator * dt batch_size: int Number of elements in one batch Returns ------- - + List of partial propagators + i) as operators + ii) as superoperators """ batches = int(tf.math.ceil(dyn_gens.shape[0] / batch_size)) @@ -408,7 +406,7 @@ def tf_batch_propagate(dyn_gens, batch_size): dUs_array = tf.TensorArray(tf.complex128, size=batches, infer_shape=False) for i in range(batches): x = batch_array.read(i) - result = tf.linalg.expm(x) + result = tf.linalg.expm(tf.cast(x, dtype=tf.complex128)) dUs_array = dUs_array.write(i, result) return dUs_array.concat() diff --git a/c3/model.py b/c3/model.py index cbd195a7..83796d1d 100755 --- a/c3/model.py +++ b/c3/model.py @@ -414,7 +414,14 @@ def get_dynamics_generators(self, signal): ts = [] ts_list = [sig["ts"][1:] for sig in signal.values()] ts = tf.constant(tf.math.reduce_mean(ts_list, axis=0)) - + if not np.all( + tf.math.reduce_variance(ts_list, axis=0) < 1e-5 * (ts[1] - ts[0]) + ): + raise Exception("C3Error:Something with the times happend.") + if not np.all( + tf.math.reduce_variance(ts[1:] - ts[:-1]) < 1e-5 * (ts[1] - ts[0]) # type: ignore + ): + raise Exception("C3Error:Something with the times happend.") dt = tf.cast(ts[1] - ts[0], dtype=tf.complex128) return dyn_gens * dt From 2c8bd4ada995badc6f4321a7391ea815b187b9f4 Mon Sep 17 00:00:00 2001 From: Moritz Schu Date: Wed, 13 Jul 2022 16:31:29 +0200 Subject: [PATCH 03/14] removed batched processing of partial propagators and fixed failing tests by only executing them in eager mode --- c3/experiment.py | 8 +++++--- c3/libraries/propagation.py | 6 +++--- c3/model.py | 10 ++++++---- c3/optimizers/optimalcontrol.py | 2 +- test/test_quantity.py | 6 ++++-- 5 files changed, 19 insertions(+), 13 deletions(-) diff --git a/c3/experiment.py b/c3/experiment.py index 3d12de94..bb914c3e 100755 --- a/c3/experiment.py +++ b/c3/experiment.py @@ -505,12 +505,14 @@ def compute_propagators(self): dUs = result["dUs"] signal = generator.generate_signals(instr) ts_list = [sig["ts"][1:] for sig in signal.values()] - ts = tf.constant(tf.math.reduce_mean(ts_list, axis=0)) - if not np.all( + ts = tf.math.reduce_mean(ts_list, axis=0) + #Only do the safety check outside of graph mode for performance reasons. + #When using graph mode, the safety check will still be executed ONCE during tracing + if tf.executing_eagerly() and not tf.reduce_all( tf.math.reduce_variance(ts_list, axis=0) < 1e-5 * (ts[1] - ts[0]) ): raise Exception("C3Error:Something with the times happend.") - if not np.all( + if tf.executing_eagerly() and not tf.reduce_all( tf.math.reduce_variance(ts[1:] - ts[:-1]) < 1e-5 * (ts[1] - ts[0]) # type: ignore ): raise Exception("C3Error:Something with the times happend.") diff --git a/c3/libraries/propagation.py b/c3/libraries/propagation.py index d2bc6777..eec5dd4f 100644 --- a/c3/libraries/propagation.py +++ b/c3/libraries/propagation.py @@ -242,12 +242,12 @@ def pwc(model: Model, gen: Generator, instr: Instruction, folding_stack: list) - """ signal = gen.generate_signals(instr) # Why do I get 0.0 if I print gen.resolution here?! FR + #print(signal) dynamics_generators = model.get_dynamics_generators(signal) + #print("dynamics_generators",dynamics_generators) # these are the hamiltonians - batch_size = tf.constant(len(dynamics_generators[0]), tf.int32) - - dUs = tf_batch_propagate(dynamics_generators, batch_size=batch_size) + dUs = tf.linalg.expm(dynamics_generators) # U = tf_matmul_left(tf.cast(dUs, tf.complex128)) U = tf_matmul_n(dUs, folding_stack) diff --git a/c3/model.py b/c3/model.py index 83796d1d..cacc218c 100755 --- a/c3/model.py +++ b/c3/model.py @@ -413,12 +413,14 @@ def get_dynamics_generators(self, signal): ts = [] ts_list = [sig["ts"][1:] for sig in signal.values()] - ts = tf.constant(tf.math.reduce_mean(ts_list, axis=0)) - if not np.all( - tf.math.reduce_variance(ts_list, axis=0) < 1e-5 * (ts[1] - ts[0]) + ts = tf.math.reduce_mean(ts_list, axis=0) + #Only do the safety check outside of graph mode for performance reasons. + #When using graph mode, the safety check will still be executed ONCE during tracing + if tf.executing_eagerly() and not tf.reduce_all( + tf.math.reduce_variance(ts_list, axis=0) < (1e-5 * (ts[1] - ts[0])) ): raise Exception("C3Error:Something with the times happend.") - if not np.all( + if tf.executing_eagerly() and not tf.reduce_all( tf.math.reduce_variance(ts[1:] - ts[:-1]) < 1e-5 * (ts[1] - ts[0]) # type: ignore ): raise Exception("C3Error:Something with the times happend.") diff --git a/c3/optimizers/optimalcontrol.py b/c3/optimizers/optimalcontrol.py index 44a8b706..382da512 100755 --- a/c3/optimizers/optimalcontrol.py +++ b/c3/optimizers/optimalcontrol.py @@ -156,7 +156,7 @@ def optimize_controls(self, setup_log: bool = True) -> None: self.load_best(self.logdir + "best_point_" + self.logname) self.end_log() - @tf.function + @tf.function(autograph=True) def goal_run(self, current_params: tf.Tensor) -> tf.float64: """ Evaluate the goal function for current parameters. diff --git a/test/test_quantity.py b/test/test_quantity.py index 2317b3fe..b2993598 100644 --- a/test/test_quantity.py +++ b/test/test_quantity.py @@ -4,6 +4,7 @@ import numpy as np import pytest from c3.c3objs import Quantity, hjson_encode +import tensorflow as tf amp = Quantity(value=0.0, min_val=-1.0, max_val=+1.0, unit="V") amp_dict = { @@ -157,10 +158,11 @@ def test_qty_math() -> None: assert a * b == 1.0 assert b * a == 1.0 np.testing.assert_allclose(a**b, 0.25) - assert b**a == 2**0.5 + np.testing.assert_allclose(b**a, 2**0.5) np.testing.assert_allclose(a / b, 0.25) assert b / a == 4.0 - assert b % a == 0 + with tf.device("/cpu:0"): + assert b % a == 0 qty = Quantity(3, min_val=0, max_val=5) qty.subtract(1.3) From a1944aba76cebd841d318de0f9da96281a7da464 Mon Sep 17 00:00:00 2001 From: Moritz Schu Date: Fri, 15 Jul 2022 16:10:40 +0200 Subject: [PATCH 04/14] fixed lindbladian and added new propagation methods --- c3/experiment.py | 16 +- c3/libraries/chip.py | 3 +- c3/libraries/propagation.py | 207 +++++++++++++++++++++++++- c3/model.py | 24 +-- examples/single_qubit_blackbox_exp.py | 9 +- 5 files changed, 237 insertions(+), 22 deletions(-) diff --git a/c3/experiment.py b/c3/experiment.py index bb914c3e..b57d95d4 100755 --- a/c3/experiment.py +++ b/c3/experiment.py @@ -498,11 +498,15 @@ def compute_propagators(self): model.controllability = self.use_control_fields steps = int((instr.t_end - instr.t_start) * self.sim_res) - result = self.propagation( - model, generator, instr, self.folding_stack[steps] - ) + if self.propagation is unitary_provider["pwc"]: + result = self.propagation( + model, generator, instr, self.folding_stack[steps] + ) + else: + result = self.propagation( + model, generator, instr, + ) U = result["U"] - dUs = result["dUs"] signal = generator.generate_signals(instr) ts_list = [sig["ts"][1:] for sig in signal.values()] ts = tf.math.reduce_mean(ts_list, axis=0) @@ -559,7 +563,9 @@ def compute_propagators(self): dephasing_channel = model.get_dephasing_channel(t_final, amps) U = tf.matmul(dephasing_channel, U) propagators[gate] = U - partial_propagators[gate] = dUs + if self.propagation is unitary_provider["pwc"] or \ + self.propagation is unitary_provider["pwc_sequential_parallel"]: + partial_propagators[gate] = result["dUs"] # TODO we might want to move storing of the propagators to the instruction object if self.overwrite_propagators: diff --git a/c3/libraries/chip.py b/c3/libraries/chip.py index e55c3c60..94a48e57 100755 --- a/c3/libraries/chip.py +++ b/c3/libraries/chip.py @@ -1234,5 +1234,6 @@ def get_Hamiltonian( return h elif isinstance(signal, dict): sig = tf.cast(signal["values"], tf.complex128) - sig = tf.reshape(sig, [sig.shape[0], 1, 1]) + #sig = tf.reshape(sig, [sig.shape[0], 1, 1]) + sig = tf.reshape(sig, [tf.shape(sig)[0], 1, 1]) return tf.expand_dims(h, 0) * sig diff --git a/c3/libraries/propagation.py b/c3/libraries/propagation.py index eec5dd4f..9c6a06cd 100644 --- a/c3/libraries/propagation.py +++ b/c3/libraries/propagation.py @@ -11,6 +11,7 @@ tf_matmul_n, tf_spre, tf_spost, + Id_like, ) unitary_provider = dict() @@ -241,11 +242,9 @@ def pwc(model: Model, gen: Generator, instr: Instruction, folding_stack: list) - Matrix representation of the gate. """ signal = gen.generate_signals(instr) - # Why do I get 0.0 if I print gen.resolution here?! FR - #print(signal) + # Why do I get 0.0 if I rint gen.resolution here?! FR dynamics_generators = model.get_dynamics_generators(signal) - #print("dynamics_generators",dynamics_generators) # these are the hamiltonians dUs = tf.linalg.expm(dynamics_generators) @@ -259,6 +258,208 @@ def pwc(model: Model, gen: Generator, instr: Instruction, folding_stack: list) - return {"U": U, "dUs": dUs} +@unitary_deco +@tf.function +def pwc_sequential(model: Model, gen: Generator, instr: Instruction) -> Dict: + """ + Solve the equation of motion (Lindblad or Schrรถdinger) for a given control + signal and Hamiltonians. + + Parameters + ---------- + signal: dict + Waveform of the control signal per drive line. + gate: str + Identifier for one of the gates. + + Returns + ------- + unitary + Matrix representation of the gate. + """ + signal = gen.generate_signals(instr) + #get number of time steps in the signal + n = tf.constant(len(list(list(signal.values())[0].values())[0]),dtype=tf.int32) + + if model.lindbladian: + U = Id_like(model.get_Liouvillian(None)) + else: + U = Id_like(model.get_system_Hamiltonian()) + + for i in range(n): + mini_signal = {} + for key in signal.keys(): + mini_signal[key] = {} + mini_signal[key]['values'] = tf.expand_dims(signal[key]['values'][i],0) + #the ts are only used to compute dt and therefore this works + mini_signal[key]['ts'] = signal[key]['ts'][0:2] + dynamics_generators = model.get_dynamics_generators(mini_signal) + + dU = tf.linalg.expm(dynamics_generators) + #i made shure that this order produces the same result as the original pwc function + U = dU[0] @ U + + if model.max_excitations: + U = model.blowup_excitations(tf_matmul_left(tf.cast(dUs, tf.complex128))) + dUs = tf.vectorized_map(model.blowup_excitations, dUs) + + return {"U": U} + + +@unitary_deco +@tf.function +def pwc_sequential_parallel( + model: Model, + gen: Generator, + instr: Instruction, + parallel: tf.int32 = tf.constant(16,tf.int32), + ) -> Dict: + """ + Solve the equation of motion (Lindblad or Schrรถdinger) for a given control + signal and Hamiltonians. + + Parameters + ---------- + signal: dict + Waveform of the control signal per drive line. + gate: str + Identifier for one of the gates. + parallel: tf.int32: + number of prarallelly executing matrix multiplications + + Returns + ------- + unitary + Matrix representation of the gate. + """ + signal = gen.generate_signals(instr) + #get number of time steps in the signal. Since this should always be the same, + #it does not interfere with tf.function tracing + n = tf.constant(len(list(list(signal.values())[0].values())[0]),dtype=tf.int32) + + batch_size = tf.cast(tf.math.ceil(tf.math.divide(n,parallel)),tf.int32) + + #i tried to set the Us to None at the beginning and have an if else condition + #that handles the first call, but tensorflow complained + if model.lindbladian: + Us = tf.eye(model.get_Liouvillian().shape[1],batch_shape=[parallel],dtype=tf.complex128) + else: + Us = tf.eye(model.get_system_Hamiltonian().shape[1],batch_shape=[parallel],dtype=tf.complex128) + + #batch_size is the number of operations happening sequentially, parallel is the number + #of operations happening in parallel. their product is the total number of operations. + for i in range(batch_size): + mini_signal = {} + for key in signal.keys(): + mini_signal[key] = {} + #the signals pulled here are not in sequence, but that should not matter + #the multiplication of the relevant propagators is still ordered correctly + mini_signal[key]['values'] = signal[key]['values'][i::batch_size] + #the ts are only used to compute dt and therefore this works + mini_signal[key]['ts'] = signal[key]['ts'][0:2] + dynamics_generators = model.get_dynamics_generators(mini_signal) + + dUs = tf.linalg.expm(dynamics_generators) + #the last slice of dUs might be differently sized. In that case fill the missing space with eye + #While loops are inefficient, but this only needs to be called once at the end. + while tf.shape(Us)[0] > tf.shape(dUs)[0]: + dUs = tf.concat([ + dUs, + tf.eye( + Us.shape[1], + #batch_shape=[Us.shape[0] - dUs.shape[0]], #if this worked, the while could be an if + batch_shape=[1], + dtype=tf.complex128),], + axis=0, + ) + #i made shure that this order produces the same result as the original pwc function + Us = dUs @ Us + + #The Us are partially accumulated propagators, multiplying them together + #yields the final propagator. They serve a similar function as the dUs + #but there are typically fewer of them + U = tf_matmul_left(tf.cast(Us, tf.complex128)) + + if model.max_excitations: + U = model.blowup_excitations(tf_matmul_left(tf.cast(dU, tf.complex128))) + Us = tf.vectorized_map(model.blowup_excitations, dU) + + return {"U": U, "dUs": Us} + + +@unitary_deco +@tf.function +def pwc_sequential_parallel2( + model: Model, + gen: Generator, + instr: Instruction, + parallel: tf.int32 = tf.constant(16,tf.int32), + ) -> Dict: + """ + Solve the equation of motion (Lindblad or Schrรถdinger) for a given control + signal and Hamiltonians. + + Parameters + ---------- + signal: dict + Waveform of the control signal per drive line. + gate: str + Identifier for one of the gates. + parallel: tf.int32: + number of prarallelly executing matrix multiplications + + Returns + ------- + unitary + Matrix representation of the gate. + """ + signal = gen.generate_signals(instr) + #get number of time steps in the signal + n = tf.constant(len(list(list(signal.values())[0].values())[0]),dtype=tf.int32) + + batch_size = tf.cast(tf.math.ceil(tf.math.divide(n,parallel)),tf.int32) + tf.print("batch_size", batch_size ) + tf.print("n", n ) + + ns = tf.TensorArray(tf.int32,parallel + 1,infer_shape = True) + for i in range(parallel + 1): + ns = ns.write(i,tf.minimum(i*batch_size,n)) + Us = tf.TensorArray(tf.complex128,parallel,infer_shape = True) + if model.lindbladian: + for i in range(parallel): + Us = Us.write(i,Id_like(model.get_Liouvillian(None))) + else: + for i in range(parallel): + Us = Us.write(i,Id_like(model.get_system_Hamiltonian(None))) + + #tf function should parallelize this first loop + with tf.device("/cpu:0"): + for j in range(parallel): + tf.print(j) + for i in range(ns.read(j),ns.read(j+1)): + mini_signal = {} + for key in signal.keys(): + mini_signal[key] = {} + mini_signal[key]['values'] = tf.expand_dims(signal[key]['values'][i],0) + #the ts are only used to compute dt and therefore this works + mini_signal[key]['ts'] = signal[key]['ts'][0:2] + dynamics_generators = model.get_dynamics_generators(mini_signal) + + dU = tf.linalg.expm(dynamics_generators) + #accumulate propagator + Us = Us.write(j,Us.read(j) @ dU[0]) + + U = Us.read(0) + for j in range(1,parallel): + U = U @ Us.read(j) + + if model.max_excitations: + U = model.blowup_excitations(tf_matmul_left(tf.cast(dUs, tf.complex128))) + dUs = tf.vectorized_map(model.blowup_excitations, dUs) + + return {"U": U} + + #################### # HELPER FUNCTIONS # #################### diff --git a/c3/model.py b/c3/model.py index cacc218c..1e307e4e 100755 --- a/c3/model.py +++ b/c3/model.py @@ -51,8 +51,8 @@ def __init__(self, subsystems=None, couplings=None, tasks=None, max_excitations= self.tasks: dict = dict() self.drift_ham = None self.dressed_drift_ham = None - self.__hamiltonians = None - self.__dressed_hamiltonians = None + self._hamiltonians = None + self._dressed_hamiltonians = None if subsystems: self.set_components(subsystems, couplings, max_excitations) if tasks: @@ -338,7 +338,7 @@ def get_sparse_Hamiltonians(self): sparse_controls = tf.vectorized_map(self.blowup_excitations, controls) return sparse_drift, sparse_controls - def get_Hamiltonian(self, signal=None): + def get_system_Hamiltonian(self, signal=None): """Get a hamiltonian with an optional signal. This will return an hamiltonian over time. Can be used e.g. for tuning the frequency of a transmon, where the control hamiltonian is not easily accessible. If max.excitation is non-zero the resulting Hamiltonian is cut accordingly""" @@ -349,10 +349,10 @@ def get_Hamiltonian(self, signal=None): signal_hamiltonian = self.drift_ham else: if self.dressed: - hamiltonians = copy.deepcopy(self.__dressed_hamiltonians) + hamiltonians = copy.deepcopy(self._dressed_hamiltonians) transform = self.transform else: - hamiltonians = copy.deepcopy(self.__hamiltonians) + hamiltonians = copy.deepcopy(self._hamiltonians) transform = None for key, sig in signal.items(): if key in self.subsystems: @@ -379,7 +379,7 @@ def get_Hamiltonian(self, signal=None): return signal_hamiltonian def get_Liouvillian(self, signal): - h = self.get_Hamiltonian() + h = self.get_system_Hamiltonian(signal) col_ops = self.get_Lindbladians() if self.max_excitations: col_ops = self.cut_excitations(col_ops) @@ -409,10 +409,10 @@ def get_dynamics_generators(self, signal): if self.lindbladian: dyn_gens = self.get_Liouvillian(signal) else: - dyn_gens = -1j * self.get_Hamiltonian(signal) + dyn_gens = -1j * self.get_system_Hamiltonian(signal) ts = [] - ts_list = [sig["ts"][1:] for sig in signal.values()] + ts_list = [sig["ts"][:] for sig in signal.values()] ts = tf.math.reduce_mean(ts_list, axis=0) #Only do the safety check outside of graph mode for performance reasons. #When using graph mode, the safety check will still be executed ONCE during tracing @@ -429,7 +429,7 @@ def get_dynamics_generators(self, signal): return dyn_gens * dt def get_sparse_Hamiltonian(self, signal=None): - return self.blowup_excitations(self.get_Hamiltonian(signal)) + return self.blowup_excitations(self.get_system_Hamiltonian(signal)) def get_Lindbladians(self): if self.dressed: @@ -458,7 +458,7 @@ def update_Hamiltonians(self): self.drift_ham = sum(hamiltonians.values()) self.control_hams = control_hams - self.__hamiltonians = hamiltonians + self._hamiltonians = hamiltonians def update_Lindbladians(self): """Return Lindbladian operators and their prefactors.""" @@ -516,7 +516,7 @@ def update_dressed(self, ordered=True): dressed_control_hams = {} dressed_col_ops = [] dressed_hamiltonians = dict() - for k, h in self.__hamiltonians.items(): + for k, h in self._hamiltonians.items(): dressed_hamiltonians[k] = tf.matmul( tf.matmul(tf.linalg.adjoint(self.transform), h), self.transform ) @@ -530,7 +530,7 @@ def update_dressed(self, ordered=True): ) self.dressed_drift_ham = dressed_drift_ham self.dressed_control_hams = dressed_control_hams - self.__dressed_hamiltonians = dressed_hamiltonians + self._dressed_hamiltonians = dressed_hamiltonians if self.lindbladian: for col_op in self.col_ops: dressed_col_ops.append( diff --git a/examples/single_qubit_blackbox_exp.py b/examples/single_qubit_blackbox_exp.py index 145f951f..2519319c 100644 --- a/examples/single_qubit_blackbox_exp.py +++ b/examples/single_qubit_blackbox_exp.py @@ -15,7 +15,7 @@ def create_experiment(): - lindblad = False + lindblad = True dressed = True qubit_lvls = 3 freq = 5e9 @@ -27,6 +27,8 @@ def create_experiment(): awg_res = 2e9 sideband = 50e6 lo_freq = 5e9 + sideband + qubit_t1 = 0.20e-6 + qubit_t2star = 0.39e-6 # ### MAKE MODEL q1 = chip.Qubit( @@ -46,6 +48,11 @@ def create_experiment(): ), hilbert_dim=qubit_lvls, temp=Qty(value=qubit_temp, min_val=0.0, max_val=0.12, unit="K"), + t1 = Qty(value = qubit_t1, min_val=0.001e-6, max_val=90e-6, unit='s'), + t2star = Qty( + value = qubit_t2star, + min_val=0.0010e-6, max_val=90e-6, + unit='s'), ) drive = chip.Drive( From 4f50fef27e24fec357f59454677a8ddaa351c324 Mon Sep 17 00:00:00 2001 From: Moritz Schu Date: Fri, 15 Jul 2022 16:28:04 +0200 Subject: [PATCH 05/14] deleted some print statements and removed a useless function --- c3/libraries/propagation.py | 73 ------------------------------------- 1 file changed, 73 deletions(-) diff --git a/c3/libraries/propagation.py b/c3/libraries/propagation.py index 9c6a06cd..4ed533b6 100644 --- a/c3/libraries/propagation.py +++ b/c3/libraries/propagation.py @@ -387,79 +387,6 @@ def pwc_sequential_parallel( return {"U": U, "dUs": Us} -@unitary_deco -@tf.function -def pwc_sequential_parallel2( - model: Model, - gen: Generator, - instr: Instruction, - parallel: tf.int32 = tf.constant(16,tf.int32), - ) -> Dict: - """ - Solve the equation of motion (Lindblad or Schrรถdinger) for a given control - signal and Hamiltonians. - - Parameters - ---------- - signal: dict - Waveform of the control signal per drive line. - gate: str - Identifier for one of the gates. - parallel: tf.int32: - number of prarallelly executing matrix multiplications - - Returns - ------- - unitary - Matrix representation of the gate. - """ - signal = gen.generate_signals(instr) - #get number of time steps in the signal - n = tf.constant(len(list(list(signal.values())[0].values())[0]),dtype=tf.int32) - - batch_size = tf.cast(tf.math.ceil(tf.math.divide(n,parallel)),tf.int32) - tf.print("batch_size", batch_size ) - tf.print("n", n ) - - ns = tf.TensorArray(tf.int32,parallel + 1,infer_shape = True) - for i in range(parallel + 1): - ns = ns.write(i,tf.minimum(i*batch_size,n)) - Us = tf.TensorArray(tf.complex128,parallel,infer_shape = True) - if model.lindbladian: - for i in range(parallel): - Us = Us.write(i,Id_like(model.get_Liouvillian(None))) - else: - for i in range(parallel): - Us = Us.write(i,Id_like(model.get_system_Hamiltonian(None))) - - #tf function should parallelize this first loop - with tf.device("/cpu:0"): - for j in range(parallel): - tf.print(j) - for i in range(ns.read(j),ns.read(j+1)): - mini_signal = {} - for key in signal.keys(): - mini_signal[key] = {} - mini_signal[key]['values'] = tf.expand_dims(signal[key]['values'][i],0) - #the ts are only used to compute dt and therefore this works - mini_signal[key]['ts'] = signal[key]['ts'][0:2] - dynamics_generators = model.get_dynamics_generators(mini_signal) - - dU = tf.linalg.expm(dynamics_generators) - #accumulate propagator - Us = Us.write(j,Us.read(j) @ dU[0]) - - U = Us.read(0) - for j in range(1,parallel): - U = U @ Us.read(j) - - if model.max_excitations: - U = model.blowup_excitations(tf_matmul_left(tf.cast(dUs, tf.complex128))) - dUs = tf.vectorized_map(model.blowup_excitations, dUs) - - return {"U": U} - - #################### # HELPER FUNCTIONS # #################### From c6e109eb807b5690691064a1333299ccc6405318 Mon Sep 17 00:00:00 2001 From: Moritz Schu Date: Mon, 18 Jul 2022 15:51:31 +0200 Subject: [PATCH 06/14] fixed liouvillian propagation in pwc_parallel_sequential --- c3/experiment.py | 4 ++ c3/libraries/propagation.py | 85 ++++++++++++++++++++++++------------- c3/model.py | 13 +++--- 3 files changed, 68 insertions(+), 34 deletions(-) diff --git a/c3/experiment.py b/c3/experiment.py index b57d95d4..df421336 100755 --- a/c3/experiment.py +++ b/c3/experiment.py @@ -502,6 +502,10 @@ def compute_propagators(self): result = self.propagation( model, generator, instr, self.folding_stack[steps] ) + elif self.propagation is unitary_provider["pwc_sequential_parallel"] and \ + hasattr(self,"parallel"): + result = self.propagation(model, generator, instr, self.parallel) + else: result = self.propagation( model, generator, instr, diff --git a/c3/libraries/propagation.py b/c3/libraries/propagation.py index 4ed533b6..439b4ef4 100644 --- a/c3/libraries/propagation.py +++ b/c3/libraries/propagation.py @@ -284,7 +284,7 @@ def pwc_sequential(model: Model, gen: Generator, instr: Instruction) -> Dict: if model.lindbladian: U = Id_like(model.get_Liouvillian(None)) else: - U = Id_like(model.get_system_Hamiltonian()) + U = Id_like(model.get_Hamiltonian()) for i in range(n): mini_signal = {} @@ -312,11 +312,13 @@ def pwc_sequential_parallel( model: Model, gen: Generator, instr: Instruction, - parallel: tf.int32 = tf.constant(16,tf.int32), + parallel: int = 16, ) -> Dict: """ Solve the equation of motion (Lindblad or Schrรถdinger) for a given control signal and Hamiltonians. + This function will be retraced if different values of parallel are input + since parallel is input as a non tensorflow datatype Parameters ---------- @@ -324,7 +326,7 @@ def pwc_sequential_parallel( Waveform of the control signal per drive line. gate: str Identifier for one of the gates. - parallel: tf.int32: + parallel: int number of prarallelly executing matrix multiplications Returns @@ -332,57 +334,82 @@ def pwc_sequential_parallel( unitary Matrix representation of the gate. """ + #In this function, there is a deliberate and clear distinction between tensorflow + #and non tensorflow datatypes to guide which parts are hardwired during tracing + #and which are not. This was necessary to allow for the tf.function decorator. signal = gen.generate_signals(instr) #get number of time steps in the signal. Since this should always be the same, - #it does not interfere with tf.function tracing - n = tf.constant(len(list(list(signal.values())[0].values())[0]),dtype=tf.int32) + #it does not interfere with tf.function tracing despite its numpy data type + n_np = len(list(list(signal.values())[0].values())[0]) #non tensorflow datatype + n = tf.constant(n_np,dtype=tf.int32) #tensorflow datatype - batch_size = tf.cast(tf.math.ceil(tf.math.divide(n,parallel)),tf.int32) + #batch_size is the number of operations happening sequentially, parallel is the number + #of operations happening in parallel. + #Their product is n or slightly bigger than n. n is the total number of operations. + batch_size_np = np.ceil(n_np/parallel) #non tensorflow datatype + batch_size = tf.constant(batch_size_np,dtype=tf.int32) #tensorflow datatype #i tried to set the Us to None at the beginning and have an if else condition #that handles the first call, but tensorflow complained - if model.lindbladian: - Us = tf.eye(model.get_Liouvillian().shape[1],batch_shape=[parallel],dtype=tf.complex128) - else: - Us = tf.eye(model.get_system_Hamiltonian().shape[1],batch_shape=[parallel],dtype=tf.complex128) - #batch_size is the number of operations happening sequentially, parallel is the number - #of operations happening in parallel. their product is the total number of operations. - for i in range(batch_size): + #edge case at the end must be handled outside the loop so that tensorflow can propperly + #trace the loop. I use this to simultaniously initialize the Us + mismatch = int(n_np - np.floor(n_np/parallel) * parallel) #a modulo might also work + mini_init_signal = {} + for key in signal.keys(): + mini_init_signal[key] = {} + #the signals pulled here are not in sequence, but that should not matter + #the multiplication of the relevant propagators is still ordered correctly + mini_init_signal[key]['values'] = signal[key]['values'][batch_size - 1::batch_size] + #this does nothing but reashure tensorflow of the shape of the tensor + mini_init_signal[key]['values'] = tf.reshape(mini_init_signal[key]['values'],[mismatch]) + #the ts are only used to compute dt and therefore this works + mini_init_signal[key]['ts'] = signal[key]['ts'][0:2] + dynamics_generators = model.get_dynamics_generators(mini_init_signal) + Us = tf.linalg.expm(dynamics_generators) + #possibly correct shape + if Us.shape[1] is not parallel: + Us = tf.concat([ + Us, + tf.eye( + Us.shape[1], + batch_shape=[parallel - Us.shape[0]], + dtype=tf.complex128),], + axis=0, + ) + + #since we had to start from the back to handle the final batch with possibly different length + #we need to continue backwards and reverse the order (see batch_size - i - 2) + #this is necessary because "reversed" cant be traced + for i in range(batch_size - 1): mini_signal = {} for key in signal.keys(): mini_signal[key] = {} #the signals pulled here are not in sequence, but that should not matter #the multiplication of the relevant propagators is still ordered correctly - mini_signal[key]['values'] = signal[key]['values'][i::batch_size] + mini_signal[key]['values'] = signal[key]['values'][(batch_size - i - 2)::batch_size] + #this does nothing but reashure tensorflow of the shape of the tensor + mini_signal[key]['values'] = tf.reshape(mini_signal[key]['values'],[parallel]) #the ts are only used to compute dt and therefore this works mini_signal[key]['ts'] = signal[key]['ts'][0:2] dynamics_generators = model.get_dynamics_generators(mini_signal) dUs = tf.linalg.expm(dynamics_generators) - #the last slice of dUs might be differently sized. In that case fill the missing space with eye - #While loops are inefficient, but this only needs to be called once at the end. - while tf.shape(Us)[0] > tf.shape(dUs)[0]: - dUs = tf.concat([ - dUs, - tf.eye( - Us.shape[1], - #batch_shape=[Us.shape[0] - dUs.shape[0]], #if this worked, the while could be an if - batch_shape=[1], - dtype=tf.complex128),], - axis=0, - ) #i made shure that this order produces the same result as the original pwc function - Us = dUs @ Us + #though it is reversed just like the for loop is reversed + Us = Us @ dUs #The Us are partially accumulated propagators, multiplying them together #yields the final propagator. They serve a similar function as the dUs #but there are typically fewer of them + #here the multiplication order is flipped compared to the Us above. + #while each individual propagator in Us was accumulatd in reverse, + #the thereby resulting Us are still orderd advancing in time U = tf_matmul_left(tf.cast(Us, tf.complex128)) if model.max_excitations: - U = model.blowup_excitations(tf_matmul_left(tf.cast(dU, tf.complex128))) - Us = tf.vectorized_map(model.blowup_excitations, dU) + U = model.blowup_excitations(tf_matmul_left(tf.cast(Us, tf.complex128))) + Us = tf.vectorized_map(model.blowup_excitations, Us) return {"U": U, "dUs": Us} diff --git a/c3/model.py b/c3/model.py index 1e307e4e..35a9f254 100755 --- a/c3/model.py +++ b/c3/model.py @@ -338,7 +338,7 @@ def get_sparse_Hamiltonians(self): sparse_controls = tf.vectorized_map(self.blowup_excitations, controls) return sparse_drift, sparse_controls - def get_system_Hamiltonian(self, signal=None): + def get_Hamiltonian(self, signal=None): """Get a hamiltonian with an optional signal. This will return an hamiltonian over time. Can be used e.g. for tuning the frequency of a transmon, where the control hamiltonian is not easily accessible. If max.excitation is non-zero the resulting Hamiltonian is cut accordingly""" @@ -378,12 +378,14 @@ def get_system_Hamiltonian(self, signal=None): return signal_hamiltonian - def get_Liouvillian(self, signal): - h = self.get_system_Hamiltonian(signal) + def get_Liouvillian(self, signal=None): + h = self.get_Hamiltonian(signal) col_ops = self.get_Lindbladians() if self.max_excitations: col_ops = self.cut_excitations(col_ops) + #h = tf.expand_dims(h,0) + #print("h",h) coher_superop = -1j * (tf_utils.tf_spre(h) - tf_utils.tf_spost(h)) for col_op in col_ops: super_clp = tf.matmul( @@ -399,6 +401,7 @@ def get_Liouvillian(self, signal): tf_utils.tf_spost(tf.linalg.adjoint(col_op)), ) liouvillian = coher_superop + super_clp - anticomm_L_clp - anticomm_R_clp + #print("liouvillian",liouvillian) return liouvillian def get_dynamics_generators(self, signal): @@ -409,7 +412,7 @@ def get_dynamics_generators(self, signal): if self.lindbladian: dyn_gens = self.get_Liouvillian(signal) else: - dyn_gens = -1j * self.get_system_Hamiltonian(signal) + dyn_gens = -1j * self.get_Hamiltonian(signal) ts = [] ts_list = [sig["ts"][:] for sig in signal.values()] @@ -429,7 +432,7 @@ def get_dynamics_generators(self, signal): return dyn_gens * dt def get_sparse_Hamiltonian(self, signal=None): - return self.blowup_excitations(self.get_system_Hamiltonian(signal)) + return self.blowup_excitations(self.get_Hamiltonian(signal)) def get_Lindbladians(self): if self.dressed: From a3dd14ffb792f4f5a8778c0224e3eb5f55bea32a Mon Sep 17 00:00:00 2001 From: Moritz Schu Date: Mon, 18 Jul 2022 16:55:15 +0200 Subject: [PATCH 07/14] added typing --- c3/experiment.py | 21 ++-- c3/libraries/chip.py | 2 +- c3/libraries/propagation.py | 143 ++++++++++++++------------ c3/model.py | 10 +- c3/optimizers/modellearning.py | 6 +- examples/single_qubit_blackbox_exp.py | 7 +- 6 files changed, 101 insertions(+), 88 deletions(-) diff --git a/c3/experiment.py b/c3/experiment.py index df421336..6c07d25e 100755 --- a/c3/experiment.py +++ b/c3/experiment.py @@ -502,20 +502,23 @@ def compute_propagators(self): result = self.propagation( model, generator, instr, self.folding_stack[steps] ) - elif self.propagation is unitary_provider["pwc_sequential_parallel"] and \ - hasattr(self,"parallel"): - result = self.propagation(model, generator, instr, self.parallel) + elif self.propagation is unitary_provider[ + "pwc_sequential_parallel" + ] and hasattr(self, "parallel"): + result = self.propagation(model, generator, instr, self.parallel) else: result = self.propagation( - model, generator, instr, + model, + generator, + instr, ) U = result["U"] signal = generator.generate_signals(instr) ts_list = [sig["ts"][1:] for sig in signal.values()] ts = tf.math.reduce_mean(ts_list, axis=0) - #Only do the safety check outside of graph mode for performance reasons. - #When using graph mode, the safety check will still be executed ONCE during tracing + # Only do the safety check outside of graph mode for performance reasons. + # When using graph mode, the safety check will still be executed ONCE during tracing if tf.executing_eagerly() and not tf.reduce_all( tf.math.reduce_variance(ts_list, axis=0) < 1e-5 * (ts[1] - ts[0]) ): @@ -567,8 +570,10 @@ def compute_propagators(self): dephasing_channel = model.get_dephasing_channel(t_final, amps) U = tf.matmul(dephasing_channel, U) propagators[gate] = U - if self.propagation is unitary_provider["pwc"] or \ - self.propagation is unitary_provider["pwc_sequential_parallel"]: + if ( + self.propagation is unitary_provider["pwc"] + or self.propagation is unitary_provider["pwc_sequential_parallel"] + ): partial_propagators[gate] = result["dUs"] # TODO we might want to move storing of the propagators to the instruction object diff --git a/c3/libraries/chip.py b/c3/libraries/chip.py index 94a48e57..392d3d7d 100755 --- a/c3/libraries/chip.py +++ b/c3/libraries/chip.py @@ -1234,6 +1234,6 @@ def get_Hamiltonian( return h elif isinstance(signal, dict): sig = tf.cast(signal["values"], tf.complex128) - #sig = tf.reshape(sig, [sig.shape[0], 1, 1]) + # sig = tf.reshape(sig, [sig.shape[0], 1, 1]) sig = tf.reshape(sig, [tf.shape(sig)[0], 1, 1]) return tf.expand_dims(h, 0) * sig diff --git a/c3/libraries/propagation.py b/c3/libraries/propagation.py index 439b4ef4..288e0d9a 100644 --- a/c3/libraries/propagation.py +++ b/c3/libraries/propagation.py @@ -259,7 +259,6 @@ def pwc(model: Model, gen: Generator, instr: Instruction, folding_stack: list) - @unitary_deco -@tf.function def pwc_sequential(model: Model, gen: Generator, instr: Instruction) -> Dict: """ Solve the equation of motion (Lindblad or Schrรถdinger) for a given control @@ -278,8 +277,8 @@ def pwc_sequential(model: Model, gen: Generator, instr: Instruction) -> Dict: Matrix representation of the gate. """ signal = gen.generate_signals(instr) - #get number of time steps in the signal - n = tf.constant(len(list(list(signal.values())[0].values())[0]),dtype=tf.int32) + # get number of time steps in the signal + n = tf.constant(len(list(list(signal.values())[0].values())[0]), dtype=tf.int32) if model.lindbladian: U = Id_like(model.get_Liouvillian(None)) @@ -287,33 +286,31 @@ def pwc_sequential(model: Model, gen: Generator, instr: Instruction) -> Dict: U = Id_like(model.get_Hamiltonian()) for i in range(n): - mini_signal = {} + mini_signal: Dict[str, Dict] = {} for key in signal.keys(): mini_signal[key] = {} - mini_signal[key]['values'] = tf.expand_dims(signal[key]['values'][i],0) - #the ts are only used to compute dt and therefore this works - mini_signal[key]['ts'] = signal[key]['ts'][0:2] + mini_signal[key]["values"] = tf.expand_dims(signal[key]["values"][i], 0) + # the ts are only used to compute dt and therefore this works + mini_signal[key]["ts"] = signal[key]["ts"][0:2] dynamics_generators = model.get_dynamics_generators(mini_signal) dU = tf.linalg.expm(dynamics_generators) - #i made shure that this order produces the same result as the original pwc function + # i made shure that this order produces the same result as the original pwc function U = dU[0] @ U if model.max_excitations: - U = model.blowup_excitations(tf_matmul_left(tf.cast(dUs, tf.complex128))) - dUs = tf.vectorized_map(model.blowup_excitations, dUs) + U = model.blowup_excitations(U) return {"U": U} @unitary_deco -@tf.function def pwc_sequential_parallel( - model: Model, - gen: Generator, - instr: Instruction, - parallel: int = 16, - ) -> Dict: + model: Model, + gen: Generator, + instr: Instruction, + parallel: int = 16, +) -> Dict: """ Solve the equation of motion (Lindblad or Schrรถdinger) for a given control signal and Hamiltonians. @@ -334,77 +331,89 @@ def pwc_sequential_parallel( unitary Matrix representation of the gate. """ - #In this function, there is a deliberate and clear distinction between tensorflow - #and non tensorflow datatypes to guide which parts are hardwired during tracing - #and which are not. This was necessary to allow for the tf.function decorator. + # In this function, there is a deliberate and clear distinction between tensorflow + # and non tensorflow datatypes to guide which parts are hardwired during tracing + # and which are not. This was necessary to allow for the tf.function decorator. signal = gen.generate_signals(instr) - #get number of time steps in the signal. Since this should always be the same, - #it does not interfere with tf.function tracing despite its numpy data type - n_np = len(list(list(signal.values())[0].values())[0]) #non tensorflow datatype - n = tf.constant(n_np,dtype=tf.int32) #tensorflow datatype - - #batch_size is the number of operations happening sequentially, parallel is the number - #of operations happening in parallel. - #Their product is n or slightly bigger than n. n is the total number of operations. - batch_size_np = np.ceil(n_np/parallel) #non tensorflow datatype - batch_size = tf.constant(batch_size_np,dtype=tf.int32) #tensorflow datatype - - #i tried to set the Us to None at the beginning and have an if else condition - #that handles the first call, but tensorflow complained - - #edge case at the end must be handled outside the loop so that tensorflow can propperly - #trace the loop. I use this to simultaniously initialize the Us - mismatch = int(n_np - np.floor(n_np/parallel) * parallel) #a modulo might also work - mini_init_signal = {} + # get number of time steps in the signal. Since this should always be the same, + # it does not interfere with tf.function tracing despite its numpy data type + n_np = len(list(list(signal.values())[0].values())[0]) # non tensorflow datatype + + # batch_size is the number of operations happening sequentially, parallel is the number + # of operations happening in parallel. + # Their product is n or slightly bigger than n. n is the total number of operations. + batch_size_np = np.ceil(n_np / parallel) # non tensorflow datatype + batch_size = tf.constant(batch_size_np, dtype=tf.int32) # tensorflow datatype + + # i tried to set the Us to None at the beginning and have an if else condition + # that handles the first call, but tensorflow complained + + # edge case at the end must be handled outside the loop so that tensorflow can propperly + # trace the loop. I use this to simultaniously initialize the Us + mismatch = int( + n_np - np.floor(n_np / parallel) * parallel + ) # a modulo might also work + mini_init_signal: Dict[str, Dict] = {} for key in signal.keys(): mini_init_signal[key] = {} - #the signals pulled here are not in sequence, but that should not matter - #the multiplication of the relevant propagators is still ordered correctly - mini_init_signal[key]['values'] = signal[key]['values'][batch_size - 1::batch_size] - #this does nothing but reashure tensorflow of the shape of the tensor - mini_init_signal[key]['values'] = tf.reshape(mini_init_signal[key]['values'],[mismatch]) - #the ts are only used to compute dt and therefore this works - mini_init_signal[key]['ts'] = signal[key]['ts'][0:2] + # the signals pulled here are not in sequence, but that should not matter + # the multiplication of the relevant propagators is still ordered correctly + mini_init_signal[key]["values"] = signal[key]["values"][ + batch_size - 1 :: batch_size + ] + # this does nothing but reashure tensorflow of the shape of the tensor + mini_init_signal[key]["values"] = tf.reshape( + mini_init_signal[key]["values"], [mismatch] + ) + # the ts are only used to compute dt and therefore this works + mini_init_signal[key]["ts"] = signal[key]["ts"][0:2] dynamics_generators = model.get_dynamics_generators(mini_init_signal) Us = tf.linalg.expm(dynamics_generators) - #possibly correct shape + # possibly correct shape if Us.shape[1] is not parallel: - Us = tf.concat([ + Us = tf.concat( + [ Us, tf.eye( Us.shape[1], batch_shape=[parallel - Us.shape[0]], - dtype=tf.complex128),], + dtype=tf.complex128, + ), + ], axis=0, - ) + ) - #since we had to start from the back to handle the final batch with possibly different length - #we need to continue backwards and reverse the order (see batch_size - i - 2) - #this is necessary because "reversed" cant be traced + # since we had to start from the back to handle the final batch with possibly different length + # we need to continue backwards and reverse the order (see batch_size - i - 2) + # this is necessary because "reversed" cant be traced for i in range(batch_size - 1): - mini_signal = {} + mini_signal: Dict[str, Dict] = {} for key in signal.keys(): mini_signal[key] = {} - #the signals pulled here are not in sequence, but that should not matter - #the multiplication of the relevant propagators is still ordered correctly - mini_signal[key]['values'] = signal[key]['values'][(batch_size - i - 2)::batch_size] - #this does nothing but reashure tensorflow of the shape of the tensor - mini_signal[key]['values'] = tf.reshape(mini_signal[key]['values'],[parallel]) - #the ts are only used to compute dt and therefore this works - mini_signal[key]['ts'] = signal[key]['ts'][0:2] + # the signals pulled here are not in sequence, but that should not matter + # the multiplication of the relevant propagators is still ordered correctly + mini_signal[key]["values"] = signal[key]["values"][ + (batch_size - i - 2) :: batch_size + ] + # this does nothing but reashure tensorflow of the shape of the tensor + mini_signal[key]["values"] = tf.reshape( + mini_signal[key]["values"], [parallel] + ) + # the ts are only used to compute dt and therefore this works + mini_signal[key]["ts"] = signal[key]["ts"][0:2] dynamics_generators = model.get_dynamics_generators(mini_signal) dUs = tf.linalg.expm(dynamics_generators) - #i made shure that this order produces the same result as the original pwc function - #though it is reversed just like the for loop is reversed + # i made shure that this order produces the same result as the original pwc function + # though it is reversed just like the for loop is reversed Us = Us @ dUs - #The Us are partially accumulated propagators, multiplying them together - #yields the final propagator. They serve a similar function as the dUs - #but there are typically fewer of them - #here the multiplication order is flipped compared to the Us above. - #while each individual propagator in Us was accumulatd in reverse, - #the thereby resulting Us are still orderd advancing in time + # The Us are partially accumulated propagators, multiplying them together + # yields the final propagator. They serve a similar function as the dUs + # but there are typically fewer of them + # here the multiplication order is flipped compared to the Us above. + # while each individual propagator in Us was accumulatd in reverse, + # the thereby resulting Us are still orderd advancing in time U = tf_matmul_left(tf.cast(Us, tf.complex128)) if model.max_excitations: diff --git a/c3/model.py b/c3/model.py index 35a9f254..0c6475e2 100755 --- a/c3/model.py +++ b/c3/model.py @@ -384,8 +384,8 @@ def get_Liouvillian(self, signal=None): if self.max_excitations: col_ops = self.cut_excitations(col_ops) - #h = tf.expand_dims(h,0) - #print("h",h) + # h = tf.expand_dims(h,0) + # print("h",h) coher_superop = -1j * (tf_utils.tf_spre(h) - tf_utils.tf_spost(h)) for col_op in col_ops: super_clp = tf.matmul( @@ -401,7 +401,7 @@ def get_Liouvillian(self, signal=None): tf_utils.tf_spost(tf.linalg.adjoint(col_op)), ) liouvillian = coher_superop + super_clp - anticomm_L_clp - anticomm_R_clp - #print("liouvillian",liouvillian) + # print("liouvillian",liouvillian) return liouvillian def get_dynamics_generators(self, signal): @@ -417,8 +417,8 @@ def get_dynamics_generators(self, signal): ts = [] ts_list = [sig["ts"][:] for sig in signal.values()] ts = tf.math.reduce_mean(ts_list, axis=0) - #Only do the safety check outside of graph mode for performance reasons. - #When using graph mode, the safety check will still be executed ONCE during tracing + # Only do the safety check outside of graph mode for performance reasons. + # When using graph mode, the safety check will still be executed ONCE during tracing if tf.executing_eagerly() and not tf.reduce_all( tf.math.reduce_variance(ts_list, axis=0) < (1e-5 * (ts[1] - ts[0])) ): diff --git a/c3/optimizers/modellearning.py b/c3/optimizers/modellearning.py index 809657e2..e2bdc98a 100755 --- a/c3/optimizers/modellearning.py +++ b/c3/optimizers/modellearning.py @@ -19,6 +19,8 @@ neg_loglkh_multinom_norm, ) +from typing import Any, Optional + class ModelLearning(Optimizer): """ @@ -86,7 +88,7 @@ def __init__( super().__init__(pmap=pmap, algorithm=algorithm, logger=logger) - self.state_labels = {"all": None} + self.state_labels: Dict[str, Optional[list]] = {"all": None} for target, labels in state_labels.items(): self.state_labels[target] = [tuple(lab) for lab in labels] @@ -106,7 +108,7 @@ def __init__( self.inverse = False self.options = options - self.learn_data = {} + self.learn_data: Dict[Any, Any] = {} self.read_data(datafiles) self.sampling = sampling self.batch_sizes = batch_sizes diff --git a/examples/single_qubit_blackbox_exp.py b/examples/single_qubit_blackbox_exp.py index 2519319c..8f814474 100644 --- a/examples/single_qubit_blackbox_exp.py +++ b/examples/single_qubit_blackbox_exp.py @@ -48,11 +48,8 @@ def create_experiment(): ), hilbert_dim=qubit_lvls, temp=Qty(value=qubit_temp, min_val=0.0, max_val=0.12, unit="K"), - t1 = Qty(value = qubit_t1, min_val=0.001e-6, max_val=90e-6, unit='s'), - t2star = Qty( - value = qubit_t2star, - min_val=0.0010e-6, max_val=90e-6, - unit='s'), + t1=Qty(value=qubit_t1, min_val=0.001e-6, max_val=90e-6, unit="s"), + t2star=Qty(value=qubit_t2star, min_val=0.0010e-6, max_val=90e-6, unit="s"), ) drive = chip.Drive( From 24020f0b7129f440a7f71bdbefa5459f7532fc01 Mon Sep 17 00:00:00 2001 From: frosati1 Date: Fri, 29 Jul 2022 20:19:46 +0200 Subject: [PATCH 08/14] test for dynamics generator and enforced passing signal --- c3/libraries/propagation.py | 5 ++--- c3/model.py | 15 ++++++++++----- test/test_model.py | 15 ++++++++++++++- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/c3/libraries/propagation.py b/c3/libraries/propagation.py index 288e0d9a..2bb80d94 100644 --- a/c3/libraries/propagation.py +++ b/c3/libraries/propagation.py @@ -242,7 +242,6 @@ def pwc(model: Model, gen: Generator, instr: Instruction, folding_stack: list) - Matrix representation of the gate. """ signal = gen.generate_signals(instr) - # Why do I get 0.0 if I rint gen.resolution here?! FR dynamics_generators = model.get_dynamics_generators(signal) @@ -281,9 +280,9 @@ def pwc_sequential(model: Model, gen: Generator, instr: Instruction) -> Dict: n = tf.constant(len(list(list(signal.values())[0].values())[0]), dtype=tf.int32) if model.lindbladian: - U = Id_like(model.get_Liouvillian(None)) + U = Id_like(model.get_Liouvillian(signal)) else: - U = Id_like(model.get_Hamiltonian()) + U = Id_like(model.get_Hamiltonian(signal)) for i in range(n): mini_signal: Dict[str, Dict] = {} diff --git a/c3/model.py b/c3/model.py index 0c6475e2..6e9f0ef5 100755 --- a/c3/model.py +++ b/c3/model.py @@ -338,11 +338,16 @@ def get_sparse_Hamiltonians(self): sparse_controls = tf.vectorized_map(self.blowup_excitations, controls) return sparse_drift, sparse_controls - def get_Hamiltonian(self, signal=None): + def get_Hamiltonian(self, signal: Dict): """Get a hamiltonian with an optional signal. This will return an hamiltonian over time. Can be used e.g. for tuning the frequency of a transmon, where the control hamiltonian is not easily accessible. If max.excitation is non-zero the resulting Hamiltonian is cut accordingly""" - if signal is None: + sgnl = None + for sgn in signal.values(): + if "values" in sgn: + sgnl = sgn["values"] + + if sgnl is None: if self.dressed: signal_hamiltonian = self.dressed_drift_ham else: @@ -378,7 +383,7 @@ def get_Hamiltonian(self, signal=None): return signal_hamiltonian - def get_Liouvillian(self, signal=None): + def get_Liouvillian(self, signal): h = self.get_Hamiltonian(signal) col_ops = self.get_Lindbladians() if self.max_excitations: @@ -404,7 +409,7 @@ def get_Liouvillian(self, signal=None): # print("liouvillian",liouvillian) return liouvillian - def get_dynamics_generators(self, signal): + def get_dynamics_generators(self, signal: Dict): """Returns Tensor of Hamiltonians if model.lindbladian is False, otherwise it returns as a Tensor of superoperators the Liouvillians solving the Lindblad Master Equation @@ -431,7 +436,7 @@ def get_dynamics_generators(self, signal): return dyn_gens * dt - def get_sparse_Hamiltonian(self, signal=None): + def get_sparse_Hamiltonian(self, signal): return self.blowup_excitations(self.get_Hamiltonian(signal)) def get_Lindbladians(self): diff --git a/test/test_model.py b/test/test_model.py index d611e4ca..96270dfa 100644 --- a/test/test_model.py +++ b/test/test_model.py @@ -128,7 +128,8 @@ def test_model_couplings() -> None: @pytest.mark.unit def test_model_get_hamiltonian() -> None: - ham = model.get_Hamiltonian() + sig = {"d1": {"ts": np.linspace(0, 5e-9, 10)}} + ham = model.get_Hamiltonian(sig) np.testing.assert_allclose(ham, hdrift) sig = {"d1": {"ts": np.linspace(0, 5e-9, 10), "values": np.linspace(0e9, 20e9, 10)}} @@ -136,6 +137,18 @@ def test_model_get_hamiltonian() -> None: np.testing.assert_allclose(hams, test_data["sliced_hamiltonians"]) +@pytest.mark.unit +def test_model_get_dynamics_generators() -> None: + sig = {"d1": {"ts": np.linspace(0, 5e-9, 10)}} + dt = 5e-9 / 9 + ham = model.get_dynamics_generators(sig) + np.testing.assert_allclose(ham, (-1j * dt) * hdrift) + + sig = {"d1": {"ts": np.linspace(0, 5e-9, 10), "values": np.linspace(0e9, 20e9, 10)}} + hams = model.get_dynamics_generators(sig) + np.testing.assert_allclose(hams, (-1j * dt) * test_data["sliced_hamiltonians"]) + + @pytest.mark.unit def test_get_qubit_frequency() -> None: np.testing.assert_allclose( From b86d5b31defcaade9a2a074992cfa511f43da454 Mon Sep 17 00:00:00 2001 From: frosati1 Date: Fri, 29 Jul 2022 21:50:09 +0200 Subject: [PATCH 09/14] created function for ts, dt and test --- c3/experiment.py | 15 ++------------- c3/model.py | 12 +++++++++++- test/test_model.py | 10 ++++++++++ 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/c3/experiment.py b/c3/experiment.py index 6c07d25e..91e86ad4 100755 --- a/c3/experiment.py +++ b/c3/experiment.py @@ -515,19 +515,8 @@ def compute_propagators(self): ) U = result["U"] signal = generator.generate_signals(instr) - ts_list = [sig["ts"][1:] for sig in signal.values()] - ts = tf.math.reduce_mean(ts_list, axis=0) - # Only do the safety check outside of graph mode for performance reasons. - # When using graph mode, the safety check will still be executed ONCE during tracing - if tf.executing_eagerly() and not tf.reduce_all( - tf.math.reduce_variance(ts_list, axis=0) < 1e-5 * (ts[1] - ts[0]) - ): - raise Exception("C3Error:Something with the times happend.") - if tf.executing_eagerly() and not tf.reduce_all( - tf.math.reduce_variance(ts[1:] - ts[:-1]) < 1e-5 * (ts[1] - ts[0]) # type: ignore - ): - raise Exception("C3Error:Something with the times happend.") - self.ts = ts + times = model.get_ts_dt(signal) + self.ts = times["ts"] if model.use_FR: # TODO change LO freq to at the level of a line freqs = {} diff --git a/c3/model.py b/c3/model.py index 6e9f0ef5..94fbd0bd 100755 --- a/c3/model.py +++ b/c3/model.py @@ -419,6 +419,16 @@ def get_dynamics_generators(self, signal: Dict): else: dyn_gens = -1j * self.get_Hamiltonian(signal) + times = self.get_ts_dt(signal) + + dt = times["dt"] + + return dyn_gens * dt + + def get_ts_dt(self, signal): + """ + Given a signal it returns a Dict of time slices ts and time increment dt + """ ts = [] ts_list = [sig["ts"][:] for sig in signal.values()] ts = tf.math.reduce_mean(ts_list, axis=0) @@ -434,7 +444,7 @@ def get_dynamics_generators(self, signal: Dict): raise Exception("C3Error:Something with the times happend.") dt = tf.cast(ts[1] - ts[0], dtype=tf.complex128) - return dyn_gens * dt + return {"ts": ts, "dt": dt} def get_sparse_Hamiltonian(self, signal): return self.blowup_excitations(self.get_Hamiltonian(signal)) diff --git a/test/test_model.py b/test/test_model.py index 96270dfa..c2114ecb 100644 --- a/test/test_model.py +++ b/test/test_model.py @@ -149,6 +149,16 @@ def test_model_get_dynamics_generators() -> None: np.testing.assert_allclose(hams, (-1j * dt) * test_data["sliced_hamiltonians"]) +@pytest.mark.unit +def test_model_get_ts_dt() -> None: + ts = np.linspace(0, 5e-9, 10) + sig = {"d1": {"ts": ts}} + dt = 5e-9 / 9 + times = model.get_ts_dt(sig) + np.testing.assert_allclose(times["dt"], dt) + np.testing.assert_allclose(times["ts"], ts) + + @pytest.mark.unit def test_get_qubit_frequency() -> None: np.testing.assert_allclose( From 82263a1497b08b38be4b7e0e7dfb0c4fca196ff1 Mon Sep 17 00:00:00 2001 From: frosati1 Date: Sun, 31 Jul 2022 22:43:05 +0200 Subject: [PATCH 10/14] test for lindblad --- test/test_lindblad.py | 220 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 220 insertions(+) create mode 100644 test/test_lindblad.py diff --git a/test/test_lindblad.py b/test/test_lindblad.py new file mode 100644 index 00000000..f7e8efd9 --- /dev/null +++ b/test/test_lindblad.py @@ -0,0 +1,220 @@ +""" +testing module for lindblad propagation +""" + +import pytest +import copy +import numpy as np +from c3.model import Model as Mdl +from c3.c3objs import Quantity as Qty +from c3.parametermap import ParameterMap as PMap +from c3.experiment import Experiment as Exp +from c3.generator.generator import Generator as Gnr +import c3.signal.gates as gates +import c3.libraries.chip as chip +import c3.generator.devices as devices +import c3.libraries.hamiltonians as hamiltonians +import c3.signal.pulse as pulse +import c3.libraries.envelopes as envelopes +import c3.libraries.tasks as tasks +import c3.utils.tf_utils as tf_utils + +lindblad = True +dressed = True +qubit_lvls = 2 +freq = 5e9 +anhar = -210e6 +init_temp = 0 +qubit_temp = 1e-10 +t_final = 12e-9 +sim_res = 100e9 +awg_res = 2e9 +lo_freq = freq +qubit_t1 = 0.20e-6 +qubit_t2star = 0.39e-6 + +# ### MAKE MODEL +q1 = chip.Qubit( + name="Q1", + desc="Qubit 1", + freq=Qty( + value=freq, + min_val=4.995e9, + max_val=5.005e9, + unit="Hz 2pi", + ), + anhar=Qty( + value=anhar, + min_val=-380e6, + max_val=-120e6, + unit="Hz 2pi", + ), + hilbert_dim=qubit_lvls, + temp=Qty(value=qubit_temp, min_val=0.0, max_val=0.12, unit="K"), + t1=Qty(value=qubit_t1, min_val=0.001e-6, max_val=90e-6, unit="s"), + t2star=Qty(value=qubit_t2star, min_val=0.001e-6, max_val=90e-6, unit="s"), +) + +drive = chip.Drive( + name="d1", + desc="Drive 1", + comment="Drive line 1 on qubit 1", + connected=["Q1"], + hamiltonian_func=hamiltonians.x_drive, +) +phys_components = [q1] +line_components = [drive] + +init_ground = tasks.InitialiseGround( + init_temp=Qty(value=init_temp, min_val=-0.001, max_val=0.22, unit="K") +) +task_list = [init_ground] +model = Mdl(phys_components, line_components, task_list) +model.set_lindbladian(lindblad) +model.set_dressed(dressed) + +# ### MAKE GENERATOR + +generator = Gnr( + devices={ + "LO": devices.LO(name="lo", resolution=sim_res, outputs=1), + "AWG": devices.AWG(name="awg", resolution=awg_res, outputs=1), + "DigitalToAnalog": devices.DigitalToAnalog( + name="dac", resolution=sim_res, inputs=1, outputs=1 + ), + "Mixer": devices.Mixer(name="mixer", inputs=2, outputs=1), + "VoltsToHertz": devices.VoltsToHertz( + name="v_to_hz", + V_to_Hz=Qty(value=1e9, min_val=0.9e9, max_val=1.1e9, unit="Hz/V"), + inputs=1, + outputs=1, + ), + }, +) + +generator.set_chains( + { + "d1": { + "LO": [], + "AWG": [], + "DigitalToAnalog": ["AWG"], + "Mixer": ["LO", "DigitalToAnalog"], + "VoltsToHertz": ["Mixer"], + } + } +) + + +gauss_params_single = { + "amp": Qty(value=0.440936, min_val=0.01, max_val=0.99, unit="V"), + "t_final": Qty( + value=t_final, min_val=0.5 * t_final, max_val=1.5 * t_final, unit="s" + ), + "sigma": Qty(value=t_final / 4, min_val=t_final / 8, max_val=t_final / 2, unit="s"), + "xy_angle": Qty( + value=747.256e-6, min_val=-0.5 * np.pi, max_val=2.5 * np.pi, unit="rad" + ), + "freq_offset": Qty( + value=145.022e3, + min_val=-10 * 1e6, + max_val=10 * 1e6, + unit="Hz 2pi", + ), +} + +nodrive_env = pulse.EnvelopeDrag( + name="no_drive", + params={ + "t_final": Qty( + value=t_final, min_val=0.5 * t_final, max_val=1.5 * t_final, unit="s" + ) + }, + shape=envelopes.no_drive, +) +carrier_parameters = { + "freq": Qty( + value=lo_freq, + min_val=4.5e9, + max_val=6e9, + unit="Hz 2pi", + ), + "framechange": Qty(value=0.001493, min_val=-np.pi, max_val=3 * np.pi, unit="rad"), +} +carr = pulse.Carrier( + name="carrier", + desc="Frequency of the local oscillator", + params=carrier_parameters, +) + + +QId = gates.Instruction( + name="id", t_start=0.0, t_end=t_final, channels=["d1"], targets=[0] +) + +QId.add_component(nodrive_env, "d1") +QId.add_component(copy.deepcopy(carr), "d1") +QId.comps["d1"]["carrier"].params["framechange"].set_value((t_final) % (2 * np.pi)) + + +QId1 = gates.Instruction( + name="id1", t_start=0.0, t_end=qubit_t1, channels=["d1"], targets=[0] +) + +QId1.add_component(nodrive_env, "d1") +QId1.add_component(copy.deepcopy(carr), "d1") +QId1.comps["d1"]["carrier"].params["framechange"].set_value((qubit_t1) % (2 * np.pi)) + +parameter_map = PMap(instructions=[QId, QId1], model=model, generator=generator) + +exp = Exp(pmap=parameter_map) + +init_dm = np.zeros([parameter_map.model.tot_dim, parameter_map.model.tot_dim]) +init_dm[1][1] = 1 +init_vec = tf_utils.tf_dm_to_vec(init_dm) +seq = ["id[0]"] +exp.set_opt_gates(seq) + + +@pytest.mark.unit +def test_dissipation() -> None: + """Test dissipative nature of lindblad evolution""" + + exp.set_prop_method("pwc") + unitaries = exp.compute_propagators() + U = np.array(unitaries["id[0]"]) + final_vec = np.dot(U, init_vec) + final_pops = exp.populations(final_vec, model.lindbladian) + assert final_pops[1] < 1 + + +@pytest.mark.unit +def test_t1() -> None: + """Test that T1 decay corresponds to 1/e the initial |1>-population""" + n = np.int(qubit_t1 / t_final) + n += 1 + exp.set_prop_method("pwc") + unitaries = exp.compute_propagators() + U = np.array(unitaries["id[0]"]) + final_vec = np.dot(U**n, init_vec) + final_pops = exp.populations(final_vec, model.lindbladian) + assert final_pops[1] < (1 / np.exp(1)) + + +init_dm = np.zeros([parameter_map.model.tot_dim, parameter_map.model.tot_dim]) +init_dm[0][1] = 1 +init_dm[1][0] = 1 +init_vec = tf_utils.tf_dm_to_vec(init_dm) + + +@pytest.mark.unit +def test_t2() -> None: + """Test that T2 decay corresponds to 1/e the initial coherences""" + t2 = ((2 * qubit_t1) ** (-1) + (qubit_t2star) ** (-1)) ** (-1) + n = np.int(t2 / t_final) + n += 1 + exp.set_prop_method("pwc") + unitaries = exp.compute_propagators() + U = np.array(unitaries["id[0]"]) + final_vec = np.dot(U**n, init_vec) + final_dm = tf_utils.tf_vec_to_dm(final_vec) + assert np.abs(final_dm[0][1]) < (1 / np.exp(1)) From d1aa054e8abecd4d5c9df444190e238f676223a8 Mon Sep 17 00:00:00 2001 From: frosati1 Date: Mon, 1 Aug 2022 10:10:08 +0200 Subject: [PATCH 11/14] updated fixed T1 in chip --- c3/libraries/chip.py | 2 +- test/test_lindblad.py | 10 +--------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/c3/libraries/chip.py b/c3/libraries/chip.py index 392d3d7d..63d58bc0 100755 --- a/c3/libraries/chip.py +++ b/c3/libraries/chip.py @@ -216,7 +216,7 @@ def get_Lindbladian(self, dims): Ls = [] if "t1" in self.params: t1 = self.params["t1"].get_value() - gamma = (0.5 / t1) ** 0.5 + gamma = (1 / t1) ** 0.5 L = gamma * self.collapse_ops["t1"] Ls.append(L) if "temp" in self.params: diff --git a/test/test_lindblad.py b/test/test_lindblad.py index f7e8efd9..951e0588 100644 --- a/test/test_lindblad.py +++ b/test/test_lindblad.py @@ -156,15 +156,7 @@ QId.comps["d1"]["carrier"].params["framechange"].set_value((t_final) % (2 * np.pi)) -QId1 = gates.Instruction( - name="id1", t_start=0.0, t_end=qubit_t1, channels=["d1"], targets=[0] -) - -QId1.add_component(nodrive_env, "d1") -QId1.add_component(copy.deepcopy(carr), "d1") -QId1.comps["d1"]["carrier"].params["framechange"].set_value((qubit_t1) % (2 * np.pi)) - -parameter_map = PMap(instructions=[QId, QId1], model=model, generator=generator) +parameter_map = PMap(instructions=[QId], model=model, generator=generator) exp = Exp(pmap=parameter_map) From 7909e2c1770e92421ba37a66364c514976ccc577 Mon Sep 17 00:00:00 2001 From: frosati1 Date: Mon, 1 Aug 2022 10:30:09 +0200 Subject: [PATCH 12/14] Ashutosh saw an error in the test_noise --- test/test_noise.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_noise.py b/test/test_noise.py index fcef3d3f..26f2e171 100644 --- a/test/test_noise.py +++ b/test/test_noise.py @@ -116,7 +116,7 @@ def test_noise_devices(): assert np.std(pink_noiseA) >= 0.05 * params[0] assert np.std(pink_noiseA) < 10 * params[0] + 1e-15 if params[0] > 1e-15: - assert np.median(np.abs(pink_noiseA - pink_noiseB) > 1e-10) + assert np.median(np.abs(pink_noiseA - pink_noiseB)) > 1e-10 if params[1] > 1e-15: assert np.abs(np.mean(dc_noiseA - dc_noiseB)) > 1e-6 @@ -128,7 +128,7 @@ def test_noise_devices(): assert np.std(awg_noiseA) >= 0.05 * params[2] assert np.std(awg_noiseA) < 10 * params[2] + 1e-15 if params[2] > 1e-15: - assert np.mean(np.abs(awg_noiseA - awg_noiseB) > 1e-10) + assert np.mean(np.abs(awg_noiseA - awg_noiseB)) > 1e-10 if np.max(params) > 0: assert fidelityA != fidelityB From 1d9a79330040f83d6cdfe5dea8eaf997571b965d Mon Sep 17 00:00:00 2001 From: frosati1 Date: Wed, 3 Aug 2022 10:45:30 +0200 Subject: [PATCH 13/14] test_t2 corrected --- test/test_lindblad.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/test/test_lindblad.py b/test/test_lindblad.py index 951e0588..60c73796 100644 --- a/test/test_lindblad.py +++ b/test/test_lindblad.py @@ -192,9 +192,7 @@ def test_t1() -> None: assert final_pops[1] < (1 / np.exp(1)) -init_dm = np.zeros([parameter_map.model.tot_dim, parameter_map.model.tot_dim]) -init_dm[0][1] = 1 -init_dm[1][0] = 1 +init_dm = 0.5 * np.ones([parameter_map.model.tot_dim, parameter_map.model.tot_dim]) init_vec = tf_utils.tf_dm_to_vec(init_dm) @@ -209,4 +207,4 @@ def test_t2() -> None: U = np.array(unitaries["id[0]"]) final_vec = np.dot(U**n, init_vec) final_dm = tf_utils.tf_vec_to_dm(final_vec) - assert np.abs(final_dm[0][1]) < (1 / np.exp(1)) + assert np.abs(final_dm[0][1]) < (1 / (2 * np.exp(1))) From eaeb22abcbbd2d2edaf8326c4d53397f8dd2a7aa Mon Sep 17 00:00:00 2001 From: frosati1 Date: Fri, 19 Aug 2022 12:05:32 +0200 Subject: [PATCH 14/14] replaced ** with linalg.matrix_power --- test/test_lindblad.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_lindblad.py b/test/test_lindblad.py index 60c73796..627c5b13 100644 --- a/test/test_lindblad.py +++ b/test/test_lindblad.py @@ -187,7 +187,7 @@ def test_t1() -> None: exp.set_prop_method("pwc") unitaries = exp.compute_propagators() U = np.array(unitaries["id[0]"]) - final_vec = np.dot(U**n, init_vec) + final_vec = np.dot(np.linalg.matrix_power(U, n), init_vec) final_pops = exp.populations(final_vec, model.lindbladian) assert final_pops[1] < (1 / np.exp(1)) @@ -205,6 +205,6 @@ def test_t2() -> None: exp.set_prop_method("pwc") unitaries = exp.compute_propagators() U = np.array(unitaries["id[0]"]) - final_vec = np.dot(U**n, init_vec) + final_vec = np.dot(np.linalg.matrix_power(U, n), init_vec) final_dm = tf_utils.tf_vec_to_dm(final_vec) assert np.abs(final_dm[0][1]) < (1 / (2 * np.exp(1)))