diff --git a/c3/experiment.py b/c3/experiment.py index 21e49c2e..91e86ad4 100755 --- a/c3/experiment.py +++ b/c3/experiment.py @@ -498,12 +498,25 @@ def compute_propagators(self): model.controllability = self.use_control_fields steps = int((instr.t_end - instr.t_start) * self.sim_res) - result = self.propagation( - model, generator, instr, self.folding_stack[steps] - ) + if self.propagation is unitary_provider["pwc"]: + result = self.propagation( + model, generator, instr, self.folding_stack[steps] + ) + elif self.propagation is unitary_provider[ + "pwc_sequential_parallel" + ] and hasattr(self, "parallel"): + result = self.propagation(model, generator, instr, self.parallel) + + else: + result = self.propagation( + model, + generator, + instr, + ) U = result["U"] - dUs = result["dUs"] - self.ts = result["ts"] + signal = generator.generate_signals(instr) + times = model.get_ts_dt(signal) + self.ts = times["ts"] if model.use_FR: # TODO change LO freq to at the level of a line freqs = {} @@ -546,7 +559,11 @@ def compute_propagators(self): dephasing_channel = model.get_dephasing_channel(t_final, amps) U = tf.matmul(dephasing_channel, U) propagators[gate] = U - partial_propagators[gate] = dUs + if ( + self.propagation is unitary_provider["pwc"] + or self.propagation is unitary_provider["pwc_sequential_parallel"] + ): + partial_propagators[gate] = result["dUs"] # TODO we might want to move storing of the propagators to the instruction object if self.overwrite_propagators: diff --git a/c3/libraries/chip.py b/c3/libraries/chip.py index e55c3c60..63d58bc0 100755 --- a/c3/libraries/chip.py +++ b/c3/libraries/chip.py @@ -216,7 +216,7 @@ def get_Lindbladian(self, dims): Ls = [] if "t1" in self.params: t1 = self.params["t1"].get_value() - gamma = (0.5 / t1) ** 0.5 + gamma = (1 / t1) ** 0.5 L = gamma * self.collapse_ops["t1"] Ls.append(L) if "temp" in self.params: @@ -1234,5 +1234,6 @@ def get_Hamiltonian( return h elif isinstance(signal, dict): sig = tf.cast(signal["values"], tf.complex128) - sig = tf.reshape(sig, [sig.shape[0], 1, 1]) + # sig = tf.reshape(sig, [sig.shape[0], 1, 1]) + sig = tf.reshape(sig, [tf.shape(sig)[0], 1, 1]) return tf.expand_dims(h, 0) * sig diff --git a/c3/libraries/propagation.py b/c3/libraries/propagation.py index e711788a..2bb80d94 100644 --- a/c3/libraries/propagation.py +++ b/c3/libraries/propagation.py @@ -11,6 +11,7 @@ tf_matmul_n, tf_spre, tf_spost, + Id_like, ) unitary_provider = dict() @@ -241,38 +242,10 @@ def pwc(model: Model, gen: Generator, instr: Instruction, folding_stack: list) - Matrix representation of the gate. """ signal = gen.generate_signals(instr) - # Why do I get 0.0 if I print gen.resolution here?! FR - ts = [] - if model.controllability: - h0, hctrls = model.get_Hamiltonians() - signals = [] - hks = [] - for key in signal: - signals.append(signal[key]["values"]) - ts = signal[key]["ts"] - hks.append(hctrls[key]) - signals = tf.cast(signals, tf.complex128) - hks = tf.cast(hks, tf.complex128) - else: - h0 = model.get_Hamiltonian(signal) - ts_list = [sig["ts"][1:] for sig in signal.values()] - ts = tf.constant(tf.math.reduce_mean(ts_list, axis=0)) - hks = None - signals = None - if not np.all( - tf.math.reduce_variance(ts_list, axis=0) < 1e-5 * (ts[1] - ts[0]) - ): - raise Exception("C3Error:Something with the times happend.") - if not np.all( - tf.math.reduce_variance(ts[1:] - ts[:-1]) < 1e-5 * (ts[1] - ts[0]) # type: ignore - ): - raise Exception("C3Error:Something with the times happend.") - - dt = ts[1] - ts[0] - - batch_size = tf.constant(len(h0), tf.int32) - - dUs = tf_batch_propagate(h0, hks, signals, dt, batch_size=batch_size) + + dynamics_generators = model.get_dynamics_generators(signal) + + dUs = tf.linalg.expm(dynamics_generators) # U = tf_matmul_left(tf.cast(dUs, tf.complex128)) U = tf_matmul_n(dUs, folding_stack) @@ -281,7 +254,172 @@ def pwc(model: Model, gen: Generator, instr: Instruction, folding_stack: list) - U = model.blowup_excitations(tf_matmul_left(tf.cast(dUs, tf.complex128))) dUs = tf.vectorized_map(model.blowup_excitations, dUs) - return {"U": U, "dUs": dUs, "ts": ts} + return {"U": U, "dUs": dUs} + + +@unitary_deco +def pwc_sequential(model: Model, gen: Generator, instr: Instruction) -> Dict: + """ + Solve the equation of motion (Lindblad or Schrรถdinger) for a given control + signal and Hamiltonians. + + Parameters + ---------- + signal: dict + Waveform of the control signal per drive line. + gate: str + Identifier for one of the gates. + + Returns + ------- + unitary + Matrix representation of the gate. + """ + signal = gen.generate_signals(instr) + # get number of time steps in the signal + n = tf.constant(len(list(list(signal.values())[0].values())[0]), dtype=tf.int32) + + if model.lindbladian: + U = Id_like(model.get_Liouvillian(signal)) + else: + U = Id_like(model.get_Hamiltonian(signal)) + + for i in range(n): + mini_signal: Dict[str, Dict] = {} + for key in signal.keys(): + mini_signal[key] = {} + mini_signal[key]["values"] = tf.expand_dims(signal[key]["values"][i], 0) + # the ts are only used to compute dt and therefore this works + mini_signal[key]["ts"] = signal[key]["ts"][0:2] + dynamics_generators = model.get_dynamics_generators(mini_signal) + + dU = tf.linalg.expm(dynamics_generators) + # i made shure that this order produces the same result as the original pwc function + U = dU[0] @ U + + if model.max_excitations: + U = model.blowup_excitations(U) + + return {"U": U} + + +@unitary_deco +def pwc_sequential_parallel( + model: Model, + gen: Generator, + instr: Instruction, + parallel: int = 16, +) -> Dict: + """ + Solve the equation of motion (Lindblad or Schrรถdinger) for a given control + signal and Hamiltonians. + This function will be retraced if different values of parallel are input + since parallel is input as a non tensorflow datatype + + Parameters + ---------- + signal: dict + Waveform of the control signal per drive line. + gate: str + Identifier for one of the gates. + parallel: int + number of prarallelly executing matrix multiplications + + Returns + ------- + unitary + Matrix representation of the gate. + """ + # In this function, there is a deliberate and clear distinction between tensorflow + # and non tensorflow datatypes to guide which parts are hardwired during tracing + # and which are not. This was necessary to allow for the tf.function decorator. + signal = gen.generate_signals(instr) + # get number of time steps in the signal. Since this should always be the same, + # it does not interfere with tf.function tracing despite its numpy data type + n_np = len(list(list(signal.values())[0].values())[0]) # non tensorflow datatype + + # batch_size is the number of operations happening sequentially, parallel is the number + # of operations happening in parallel. + # Their product is n or slightly bigger than n. n is the total number of operations. + batch_size_np = np.ceil(n_np / parallel) # non tensorflow datatype + batch_size = tf.constant(batch_size_np, dtype=tf.int32) # tensorflow datatype + + # i tried to set the Us to None at the beginning and have an if else condition + # that handles the first call, but tensorflow complained + + # edge case at the end must be handled outside the loop so that tensorflow can propperly + # trace the loop. I use this to simultaniously initialize the Us + mismatch = int( + n_np - np.floor(n_np / parallel) * parallel + ) # a modulo might also work + mini_init_signal: Dict[str, Dict] = {} + for key in signal.keys(): + mini_init_signal[key] = {} + # the signals pulled here are not in sequence, but that should not matter + # the multiplication of the relevant propagators is still ordered correctly + mini_init_signal[key]["values"] = signal[key]["values"][ + batch_size - 1 :: batch_size + ] + # this does nothing but reashure tensorflow of the shape of the tensor + mini_init_signal[key]["values"] = tf.reshape( + mini_init_signal[key]["values"], [mismatch] + ) + # the ts are only used to compute dt and therefore this works + mini_init_signal[key]["ts"] = signal[key]["ts"][0:2] + dynamics_generators = model.get_dynamics_generators(mini_init_signal) + Us = tf.linalg.expm(dynamics_generators) + # possibly correct shape + if Us.shape[1] is not parallel: + Us = tf.concat( + [ + Us, + tf.eye( + Us.shape[1], + batch_shape=[parallel - Us.shape[0]], + dtype=tf.complex128, + ), + ], + axis=0, + ) + + # since we had to start from the back to handle the final batch with possibly different length + # we need to continue backwards and reverse the order (see batch_size - i - 2) + # this is necessary because "reversed" cant be traced + for i in range(batch_size - 1): + mini_signal: Dict[str, Dict] = {} + for key in signal.keys(): + mini_signal[key] = {} + # the signals pulled here are not in sequence, but that should not matter + # the multiplication of the relevant propagators is still ordered correctly + mini_signal[key]["values"] = signal[key]["values"][ + (batch_size - i - 2) :: batch_size + ] + # this does nothing but reashure tensorflow of the shape of the tensor + mini_signal[key]["values"] = tf.reshape( + mini_signal[key]["values"], [parallel] + ) + # the ts are only used to compute dt and therefore this works + mini_signal[key]["ts"] = signal[key]["ts"][0:2] + dynamics_generators = model.get_dynamics_generators(mini_signal) + + dUs = tf.linalg.expm(dynamics_generators) + # i made shure that this order produces the same result as the original pwc function + # though it is reversed just like the for loop is reversed + Us = Us @ dUs + + # The Us are partially accumulated propagators, multiplying them together + # yields the final propagator. They serve a similar function as the dUs + # but there are typically fewer of them + # here the multiplication order is flipped compared to the Us above. + # while each individual propagator in Us was accumulatd in reverse, + # the thereby resulting Us are still orderd advancing in time + U = tf_matmul_left(tf.cast(Us, tf.complex128)) + + if model.max_excitations: + U = model.blowup_excitations(tf_matmul_left(tf.cast(Us, tf.complex128))) + Us = tf.vectorized_map(model.blowup_excitations, Us) + + return {"U": U, "dUs": Us} #################### @@ -400,52 +538,38 @@ def pwc_trott_drift(h0, hks, cflds_t, dt): return dUs -def tf_batch_propagate(hamiltonian, hks, signals, dt, batch_size): +def tf_batch_propagate(dyn_gens, batch_size): """ Propagate signal in batches Parameters ---------- - hamiltonian: tf.tensor - Drift Hamiltonian - hks: Union[tf.tensor, List[tf.tensor]] - List of control hamiltonians - signals: Union[tf.tensor, List[tf.tensor]] - List of control signals, one per control hamiltonian - dt: float - Length of one time slice + dyn_gens: tf.tensor + i) -1j * Hamiltonian(t) * dt + or + ii) -1j * Liouville superoperator * dt batch_size: int Number of elements in one batch Returns ------- - + List of partial propagators + i) as operators + ii) as superoperators """ - if signals is not None: - batches = int(tf.math.ceil(signals.shape[0] / batch_size)) - batch_array = tf.TensorArray( - signals.dtype, size=batches, dynamic_size=False, infer_shape=False - ) - for i in range(batches): - batch_array = batch_array.write( - i, signals[i * batch_size : i * batch_size + batch_size] - ) - else: - batches = int(tf.math.ceil(hamiltonian.shape[0] / batch_size)) - batch_array = tf.TensorArray( - hamiltonian.dtype, size=batches, dynamic_size=False, infer_shape=False + + batches = int(tf.math.ceil(dyn_gens.shape[0] / batch_size)) + batch_array = tf.TensorArray( + dyn_gens.dtype, size=batches, dynamic_size=False, infer_shape=False + ) + for i in range(batches): + batch_array = batch_array.write( + i, dyn_gens[i * batch_size : i * batch_size + batch_size] ) - for i in range(batches): - batch_array = batch_array.write( - i, hamiltonian[i * batch_size : i * batch_size + batch_size] - ) dUs_array = tf.TensorArray(tf.complex128, size=batches, infer_shape=False) for i in range(batches): x = batch_array.read(i) - if signals is not None: - result = tf_propagation_vectorized(hamiltonian, hks, x, dt) - else: - result = tf_propagation_vectorized(x, None, None, dt) + result = tf.linalg.expm(tf.cast(x, dtype=tf.complex128)) dUs_array = dUs_array.write(i, result) return dUs_array.concat() diff --git a/c3/model.py b/c3/model.py index 432bc2b8..94fbd0bd 100755 --- a/c3/model.py +++ b/c3/model.py @@ -51,8 +51,8 @@ def __init__(self, subsystems=None, couplings=None, tasks=None, max_excitations= self.tasks: dict = dict() self.drift_ham = None self.dressed_drift_ham = None - self.__hamiltonians = None - self.__dressed_hamiltonians = None + self._hamiltonians = None + self._dressed_hamiltonians = None if subsystems: self.set_components(subsystems, couplings, max_excitations) if tasks: @@ -338,21 +338,26 @@ def get_sparse_Hamiltonians(self): sparse_controls = tf.vectorized_map(self.blowup_excitations, controls) return sparse_drift, sparse_controls - def get_Hamiltonian(self, signal=None): + def get_Hamiltonian(self, signal: Dict): """Get a hamiltonian with an optional signal. This will return an hamiltonian over time. Can be used e.g. for tuning the frequency of a transmon, where the control hamiltonian is not easily accessible. If max.excitation is non-zero the resulting Hamiltonian is cut accordingly""" - if signal is None: + sgnl = None + for sgn in signal.values(): + if "values" in sgn: + sgnl = sgn["values"] + + if sgnl is None: if self.dressed: signal_hamiltonian = self.dressed_drift_ham else: signal_hamiltonian = self.drift_ham else: if self.dressed: - hamiltonians = copy.deepcopy(self.__dressed_hamiltonians) + hamiltonians = copy.deepcopy(self._dressed_hamiltonians) transform = self.transform else: - hamiltonians = copy.deepcopy(self.__hamiltonians) + hamiltonians = copy.deepcopy(self._hamiltonians) transform = None for key, sig in signal.items(): if key in self.subsystems: @@ -378,7 +383,70 @@ def get_Hamiltonian(self, signal=None): return signal_hamiltonian - def get_sparse_Hamiltonian(self, signal=None): + def get_Liouvillian(self, signal): + h = self.get_Hamiltonian(signal) + col_ops = self.get_Lindbladians() + if self.max_excitations: + col_ops = self.cut_excitations(col_ops) + + # h = tf.expand_dims(h,0) + # print("h",h) + coher_superop = -1j * (tf_utils.tf_spre(h) - tf_utils.tf_spost(h)) + for col_op in col_ops: + super_clp = tf.matmul( + tf_utils.tf_spre(col_op), + tf_utils.tf_spost(tf.linalg.adjoint(col_op)), + ) + anticomm_L_clp = 0.5 * tf.matmul( + tf_utils.tf_spre(tf.linalg.adjoint(col_op)), + tf_utils.tf_spre(col_op), + ) + anticomm_R_clp = 0.5 * tf.matmul( + tf_utils.tf_spost(col_op), + tf_utils.tf_spost(tf.linalg.adjoint(col_op)), + ) + liouvillian = coher_superop + super_clp - anticomm_L_clp - anticomm_R_clp + # print("liouvillian",liouvillian) + return liouvillian + + def get_dynamics_generators(self, signal: Dict): + """Returns Tensor of Hamiltonians if model.lindbladian is False, + otherwise it returns as a Tensor of superoperators + the Liouvillians solving the Lindblad Master Equation + """ + if self.lindbladian: + dyn_gens = self.get_Liouvillian(signal) + else: + dyn_gens = -1j * self.get_Hamiltonian(signal) + + times = self.get_ts_dt(signal) + + dt = times["dt"] + + return dyn_gens * dt + + def get_ts_dt(self, signal): + """ + Given a signal it returns a Dict of time slices ts and time increment dt + """ + ts = [] + ts_list = [sig["ts"][:] for sig in signal.values()] + ts = tf.math.reduce_mean(ts_list, axis=0) + # Only do the safety check outside of graph mode for performance reasons. + # When using graph mode, the safety check will still be executed ONCE during tracing + if tf.executing_eagerly() and not tf.reduce_all( + tf.math.reduce_variance(ts_list, axis=0) < (1e-5 * (ts[1] - ts[0])) + ): + raise Exception("C3Error:Something with the times happend.") + if tf.executing_eagerly() and not tf.reduce_all( + tf.math.reduce_variance(ts[1:] - ts[:-1]) < 1e-5 * (ts[1] - ts[0]) # type: ignore + ): + raise Exception("C3Error:Something with the times happend.") + dt = tf.cast(ts[1] - ts[0], dtype=tf.complex128) + + return {"ts": ts, "dt": dt} + + def get_sparse_Hamiltonian(self, signal): return self.blowup_excitations(self.get_Hamiltonian(signal)) def get_Lindbladians(self): @@ -408,7 +476,7 @@ def update_Hamiltonians(self): self.drift_ham = sum(hamiltonians.values()) self.control_hams = control_hams - self.__hamiltonians = hamiltonians + self._hamiltonians = hamiltonians def update_Lindbladians(self): """Return Lindbladian operators and their prefactors.""" @@ -466,7 +534,7 @@ def update_dressed(self, ordered=True): dressed_control_hams = {} dressed_col_ops = [] dressed_hamiltonians = dict() - for k, h in self.__hamiltonians.items(): + for k, h in self._hamiltonians.items(): dressed_hamiltonians[k] = tf.matmul( tf.matmul(tf.linalg.adjoint(self.transform), h), self.transform ) @@ -480,7 +548,7 @@ def update_dressed(self, ordered=True): ) self.dressed_drift_ham = dressed_drift_ham self.dressed_control_hams = dressed_control_hams - self.__dressed_hamiltonians = dressed_hamiltonians + self._dressed_hamiltonians = dressed_hamiltonians if self.lindbladian: for col_op in self.col_ops: dressed_col_ops.append( diff --git a/c3/optimizers/modellearning.py b/c3/optimizers/modellearning.py index 809657e2..e2bdc98a 100755 --- a/c3/optimizers/modellearning.py +++ b/c3/optimizers/modellearning.py @@ -19,6 +19,8 @@ neg_loglkh_multinom_norm, ) +from typing import Any, Optional + class ModelLearning(Optimizer): """ @@ -86,7 +88,7 @@ def __init__( super().__init__(pmap=pmap, algorithm=algorithm, logger=logger) - self.state_labels = {"all": None} + self.state_labels: Dict[str, Optional[list]] = {"all": None} for target, labels in state_labels.items(): self.state_labels[target] = [tuple(lab) for lab in labels] @@ -106,7 +108,7 @@ def __init__( self.inverse = False self.options = options - self.learn_data = {} + self.learn_data: Dict[Any, Any] = {} self.read_data(datafiles) self.sampling = sampling self.batch_sizes = batch_sizes diff --git a/c3/optimizers/optimalcontrol.py b/c3/optimizers/optimalcontrol.py index 44a8b706..382da512 100755 --- a/c3/optimizers/optimalcontrol.py +++ b/c3/optimizers/optimalcontrol.py @@ -156,7 +156,7 @@ def optimize_controls(self, setup_log: bool = True) -> None: self.load_best(self.logdir + "best_point_" + self.logname) self.end_log() - @tf.function + @tf.function(autograph=True) def goal_run(self, current_params: tf.Tensor) -> tf.float64: """ Evaluate the goal function for current parameters. diff --git a/examples/single_qubit_blackbox_exp.py b/examples/single_qubit_blackbox_exp.py index 145f951f..8f814474 100644 --- a/examples/single_qubit_blackbox_exp.py +++ b/examples/single_qubit_blackbox_exp.py @@ -15,7 +15,7 @@ def create_experiment(): - lindblad = False + lindblad = True dressed = True qubit_lvls = 3 freq = 5e9 @@ -27,6 +27,8 @@ def create_experiment(): awg_res = 2e9 sideband = 50e6 lo_freq = 5e9 + sideband + qubit_t1 = 0.20e-6 + qubit_t2star = 0.39e-6 # ### MAKE MODEL q1 = chip.Qubit( @@ -46,6 +48,8 @@ def create_experiment(): ), hilbert_dim=qubit_lvls, temp=Qty(value=qubit_temp, min_val=0.0, max_val=0.12, unit="K"), + t1=Qty(value=qubit_t1, min_val=0.001e-6, max_val=90e-6, unit="s"), + t2star=Qty(value=qubit_t2star, min_val=0.0010e-6, max_val=90e-6, unit="s"), ) drive = chip.Drive( diff --git a/test/test_lindblad.py b/test/test_lindblad.py new file mode 100644 index 00000000..627c5b13 --- /dev/null +++ b/test/test_lindblad.py @@ -0,0 +1,210 @@ +""" +testing module for lindblad propagation +""" + +import pytest +import copy +import numpy as np +from c3.model import Model as Mdl +from c3.c3objs import Quantity as Qty +from c3.parametermap import ParameterMap as PMap +from c3.experiment import Experiment as Exp +from c3.generator.generator import Generator as Gnr +import c3.signal.gates as gates +import c3.libraries.chip as chip +import c3.generator.devices as devices +import c3.libraries.hamiltonians as hamiltonians +import c3.signal.pulse as pulse +import c3.libraries.envelopes as envelopes +import c3.libraries.tasks as tasks +import c3.utils.tf_utils as tf_utils + +lindblad = True +dressed = True +qubit_lvls = 2 +freq = 5e9 +anhar = -210e6 +init_temp = 0 +qubit_temp = 1e-10 +t_final = 12e-9 +sim_res = 100e9 +awg_res = 2e9 +lo_freq = freq +qubit_t1 = 0.20e-6 +qubit_t2star = 0.39e-6 + +# ### MAKE MODEL +q1 = chip.Qubit( + name="Q1", + desc="Qubit 1", + freq=Qty( + value=freq, + min_val=4.995e9, + max_val=5.005e9, + unit="Hz 2pi", + ), + anhar=Qty( + value=anhar, + min_val=-380e6, + max_val=-120e6, + unit="Hz 2pi", + ), + hilbert_dim=qubit_lvls, + temp=Qty(value=qubit_temp, min_val=0.0, max_val=0.12, unit="K"), + t1=Qty(value=qubit_t1, min_val=0.001e-6, max_val=90e-6, unit="s"), + t2star=Qty(value=qubit_t2star, min_val=0.001e-6, max_val=90e-6, unit="s"), +) + +drive = chip.Drive( + name="d1", + desc="Drive 1", + comment="Drive line 1 on qubit 1", + connected=["Q1"], + hamiltonian_func=hamiltonians.x_drive, +) +phys_components = [q1] +line_components = [drive] + +init_ground = tasks.InitialiseGround( + init_temp=Qty(value=init_temp, min_val=-0.001, max_val=0.22, unit="K") +) +task_list = [init_ground] +model = Mdl(phys_components, line_components, task_list) +model.set_lindbladian(lindblad) +model.set_dressed(dressed) + +# ### MAKE GENERATOR + +generator = Gnr( + devices={ + "LO": devices.LO(name="lo", resolution=sim_res, outputs=1), + "AWG": devices.AWG(name="awg", resolution=awg_res, outputs=1), + "DigitalToAnalog": devices.DigitalToAnalog( + name="dac", resolution=sim_res, inputs=1, outputs=1 + ), + "Mixer": devices.Mixer(name="mixer", inputs=2, outputs=1), + "VoltsToHertz": devices.VoltsToHertz( + name="v_to_hz", + V_to_Hz=Qty(value=1e9, min_val=0.9e9, max_val=1.1e9, unit="Hz/V"), + inputs=1, + outputs=1, + ), + }, +) + +generator.set_chains( + { + "d1": { + "LO": [], + "AWG": [], + "DigitalToAnalog": ["AWG"], + "Mixer": ["LO", "DigitalToAnalog"], + "VoltsToHertz": ["Mixer"], + } + } +) + + +gauss_params_single = { + "amp": Qty(value=0.440936, min_val=0.01, max_val=0.99, unit="V"), + "t_final": Qty( + value=t_final, min_val=0.5 * t_final, max_val=1.5 * t_final, unit="s" + ), + "sigma": Qty(value=t_final / 4, min_val=t_final / 8, max_val=t_final / 2, unit="s"), + "xy_angle": Qty( + value=747.256e-6, min_val=-0.5 * np.pi, max_val=2.5 * np.pi, unit="rad" + ), + "freq_offset": Qty( + value=145.022e3, + min_val=-10 * 1e6, + max_val=10 * 1e6, + unit="Hz 2pi", + ), +} + +nodrive_env = pulse.EnvelopeDrag( + name="no_drive", + params={ + "t_final": Qty( + value=t_final, min_val=0.5 * t_final, max_val=1.5 * t_final, unit="s" + ) + }, + shape=envelopes.no_drive, +) +carrier_parameters = { + "freq": Qty( + value=lo_freq, + min_val=4.5e9, + max_val=6e9, + unit="Hz 2pi", + ), + "framechange": Qty(value=0.001493, min_val=-np.pi, max_val=3 * np.pi, unit="rad"), +} +carr = pulse.Carrier( + name="carrier", + desc="Frequency of the local oscillator", + params=carrier_parameters, +) + + +QId = gates.Instruction( + name="id", t_start=0.0, t_end=t_final, channels=["d1"], targets=[0] +) + +QId.add_component(nodrive_env, "d1") +QId.add_component(copy.deepcopy(carr), "d1") +QId.comps["d1"]["carrier"].params["framechange"].set_value((t_final) % (2 * np.pi)) + + +parameter_map = PMap(instructions=[QId], model=model, generator=generator) + +exp = Exp(pmap=parameter_map) + +init_dm = np.zeros([parameter_map.model.tot_dim, parameter_map.model.tot_dim]) +init_dm[1][1] = 1 +init_vec = tf_utils.tf_dm_to_vec(init_dm) +seq = ["id[0]"] +exp.set_opt_gates(seq) + + +@pytest.mark.unit +def test_dissipation() -> None: + """Test dissipative nature of lindblad evolution""" + + exp.set_prop_method("pwc") + unitaries = exp.compute_propagators() + U = np.array(unitaries["id[0]"]) + final_vec = np.dot(U, init_vec) + final_pops = exp.populations(final_vec, model.lindbladian) + assert final_pops[1] < 1 + + +@pytest.mark.unit +def test_t1() -> None: + """Test that T1 decay corresponds to 1/e the initial |1>-population""" + n = np.int(qubit_t1 / t_final) + n += 1 + exp.set_prop_method("pwc") + unitaries = exp.compute_propagators() + U = np.array(unitaries["id[0]"]) + final_vec = np.dot(np.linalg.matrix_power(U, n), init_vec) + final_pops = exp.populations(final_vec, model.lindbladian) + assert final_pops[1] < (1 / np.exp(1)) + + +init_dm = 0.5 * np.ones([parameter_map.model.tot_dim, parameter_map.model.tot_dim]) +init_vec = tf_utils.tf_dm_to_vec(init_dm) + + +@pytest.mark.unit +def test_t2() -> None: + """Test that T2 decay corresponds to 1/e the initial coherences""" + t2 = ((2 * qubit_t1) ** (-1) + (qubit_t2star) ** (-1)) ** (-1) + n = np.int(t2 / t_final) + n += 1 + exp.set_prop_method("pwc") + unitaries = exp.compute_propagators() + U = np.array(unitaries["id[0]"]) + final_vec = np.dot(np.linalg.matrix_power(U, n), init_vec) + final_dm = tf_utils.tf_vec_to_dm(final_vec) + assert np.abs(final_dm[0][1]) < (1 / (2 * np.exp(1))) diff --git a/test/test_model.py b/test/test_model.py index d611e4ca..c2114ecb 100644 --- a/test/test_model.py +++ b/test/test_model.py @@ -128,7 +128,8 @@ def test_model_couplings() -> None: @pytest.mark.unit def test_model_get_hamiltonian() -> None: - ham = model.get_Hamiltonian() + sig = {"d1": {"ts": np.linspace(0, 5e-9, 10)}} + ham = model.get_Hamiltonian(sig) np.testing.assert_allclose(ham, hdrift) sig = {"d1": {"ts": np.linspace(0, 5e-9, 10), "values": np.linspace(0e9, 20e9, 10)}} @@ -136,6 +137,28 @@ def test_model_get_hamiltonian() -> None: np.testing.assert_allclose(hams, test_data["sliced_hamiltonians"]) +@pytest.mark.unit +def test_model_get_dynamics_generators() -> None: + sig = {"d1": {"ts": np.linspace(0, 5e-9, 10)}} + dt = 5e-9 / 9 + ham = model.get_dynamics_generators(sig) + np.testing.assert_allclose(ham, (-1j * dt) * hdrift) + + sig = {"d1": {"ts": np.linspace(0, 5e-9, 10), "values": np.linspace(0e9, 20e9, 10)}} + hams = model.get_dynamics_generators(sig) + np.testing.assert_allclose(hams, (-1j * dt) * test_data["sliced_hamiltonians"]) + + +@pytest.mark.unit +def test_model_get_ts_dt() -> None: + ts = np.linspace(0, 5e-9, 10) + sig = {"d1": {"ts": ts}} + dt = 5e-9 / 9 + times = model.get_ts_dt(sig) + np.testing.assert_allclose(times["dt"], dt) + np.testing.assert_allclose(times["ts"], ts) + + @pytest.mark.unit def test_get_qubit_frequency() -> None: np.testing.assert_allclose( diff --git a/test/test_noise.py b/test/test_noise.py index fcef3d3f..26f2e171 100644 --- a/test/test_noise.py +++ b/test/test_noise.py @@ -116,7 +116,7 @@ def test_noise_devices(): assert np.std(pink_noiseA) >= 0.05 * params[0] assert np.std(pink_noiseA) < 10 * params[0] + 1e-15 if params[0] > 1e-15: - assert np.median(np.abs(pink_noiseA - pink_noiseB) > 1e-10) + assert np.median(np.abs(pink_noiseA - pink_noiseB)) > 1e-10 if params[1] > 1e-15: assert np.abs(np.mean(dc_noiseA - dc_noiseB)) > 1e-6 @@ -128,7 +128,7 @@ def test_noise_devices(): assert np.std(awg_noiseA) >= 0.05 * params[2] assert np.std(awg_noiseA) < 10 * params[2] + 1e-15 if params[2] > 1e-15: - assert np.mean(np.abs(awg_noiseA - awg_noiseB) > 1e-10) + assert np.mean(np.abs(awg_noiseA - awg_noiseB)) > 1e-10 if np.max(params) > 0: assert fidelityA != fidelityB diff --git a/test/test_quantity.py b/test/test_quantity.py index 2317b3fe..b2993598 100644 --- a/test/test_quantity.py +++ b/test/test_quantity.py @@ -4,6 +4,7 @@ import numpy as np import pytest from c3.c3objs import Quantity, hjson_encode +import tensorflow as tf amp = Quantity(value=0.0, min_val=-1.0, max_val=+1.0, unit="V") amp_dict = { @@ -157,10 +158,11 @@ def test_qty_math() -> None: assert a * b == 1.0 assert b * a == 1.0 np.testing.assert_allclose(a**b, 0.25) - assert b**a == 2**0.5 + np.testing.assert_allclose(b**a, 2**0.5) np.testing.assert_allclose(a / b, 0.25) assert b / a == 4.0 - assert b % a == 0 + with tf.device("/cpu:0"): + assert b % a == 0 qty = Quantity(3, min_val=0, max_val=5) qty.subtract(1.3)