From ca5607dc72fb1037e8fd668145ff7b7f9d1222bf Mon Sep 17 00:00:00 2001 From: waleeattia Date: Wed, 8 Dec 2021 17:59:16 -0500 Subject: [PATCH 1/9] adding losses.py --- benchmarks/cifar_exp/losses.py | 68 ++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 benchmarks/cifar_exp/losses.py diff --git a/benchmarks/cifar_exp/losses.py b/benchmarks/cifar_exp/losses.py new file mode 100644 index 0000000000..f85a63e1e5 --- /dev/null +++ b/benchmarks/cifar_exp/losses.py @@ -0,0 +1,68 @@ +import math +import tensorflow as tf +from tensorflow.keras import backend as K +import tensorflow_addons as tfa + +def logDiff(yTrue,yPred): + return K.sum(K.log(yTrue) - K.log(yPred)) + +# sum over samples in batch (anchors) -> +# average over similar samples (positive) -> +# of - log softmax positive / sum negatives (wrt cos similarity) +# i.e. \sum_i -1/|P(i)| \sum_{p \in P(i)} log [exp(z_i @ z_p / t) / \sum_{n \in N(i)} exp(z_i @ z_n / t)] +# = \sum_i [log[\sum_{n \in N(i)} exp(z_i @ z_n / t)] - 1/|P(i)| \sum_{p \in P(i)} log [exp(z_i @ z_p / t)]] +def supervised_contrastive_loss(yTrue, yPred): + print(yTrue) + print(yPred) + temp=0.1 + #r = tf.reshape(yPred,(32,10)) + #y = tf.reshape(yTrue,(32,10)) + r = yPred + y = yTrue + + r, _ = tf.linalg.normalize(r, axis=1) + r_dists = tf.matmul(r, tf.transpose(r)) + print(r_dists.shape) + ''' + for i in range(len(r_dists)): + r_dists[i] = 0 + ''' + #r_dists = tf.linalg.set_diag(r_dists, tf.zeros(r_dists.shape[0], dtype=r_dists.dtype)) # exclude itself distance + r_dists = r_dists / temp + y_norms = tf.reduce_sum(y * y, 1) + y = y_norms - 2 * tf.matmul(y, tf.transpose(y)) + tf.transpose(y_norms) + + y = tf.cast(y / 2, r_dists.dtype) # scale onehot distances to 0 and 1 + negative_sum = tf.math.log(tf.reduce_sum(y * tf.exp(r_dists), axis=1)) # y zeros diagonal 1's + positive_sum = (1 - y) * r_dists + + n_nonzero = tf.math.reduce_sum(1-y, axis=1) - 1 # Subtract diagonal + positive_sum = tf.reduce_sum(positive_sum, axis=1) / tf.cast(n_nonzero, positive_sum.dtype) + loss = tf.reduce_sum(negative_sum - positive_sum) + return loss + +# siamese networks version +def contrastiveLoss(yTrue,yPred): + print(1-yPred) + # make sure the datatypes are the same + yTrue = tf.cast(yTrue, yPred.dtype) + squaredPreds = K.square(yPred) + squaredMargin = K.square(K.maximum(1-yPred, 0)) + loss = K.mean(yTrue * squaredPreds + (1-yTrue) * squaredMargin) + print(loss, type(loss)) + return loss + +def cosSimilarity(vec1, vec2): + print(vec1, vec2) + sim = tf.reduce_sum(tf.reduce_sum(tf.multiply(vec1, vec1))) + print(sim.shape) + return sim + +def SupervisedContrastiveLoss(yTrue, yPred): + temp=0.1 + r = yPred + y = yTrue + r, _ = tf.linalg.normalize(r, axis=1) + r_dists = tf.matmul(r, tf.transpose(r)) + logits = tf.divide(r_dists, temp) + return tfa.losses.npairs_loss(tf.squeeze(tf.reduce_sum(y * y, 1)), logits) \ No newline at end of file From 196ca94790cea4dcb1100a04c0ab4862f41ad3e4 Mon Sep 17 00:00:00 2001 From: waleeattia Date: Wed, 8 Dec 2021 18:01:22 -0500 Subject: [PATCH 2/9] updating fte_bte_exp.py with supervised contrastive loss --- benchmarks/cifar_exp/fte_bte_exp.py | 477 +++++++++++----------------- 1 file changed, 178 insertions(+), 299 deletions(-) diff --git a/benchmarks/cifar_exp/fte_bte_exp.py b/benchmarks/cifar_exp/fte_bte_exp.py index 975918cf59..2377140cc5 100644 --- a/benchmarks/cifar_exp/fte_bte_exp.py +++ b/benchmarks/cifar_exp/fte_bte_exp.py @@ -1,12 +1,13 @@ #%% import random -import matplotlib.pyplot as plt import tensorflow as tf -import keras -from keras import layers +from tensorflow import keras +from tensorflow.keras import layers from itertools import product import pandas as pd +from losses import SupervisedContrastiveLoss # adapted version of SupConLoss for ftebte setting, uses cosine similarity matrix + import numpy as np import pickle @@ -16,41 +17,47 @@ from joblib import Parallel, delayed from multiprocessing import Pool +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.callbacks import EarlyStopping from proglearn.progressive_learner import ProgressiveLearner from proglearn.deciders import SimpleArgmaxAverage -from proglearn.transformers import ( - NeuralClassificationTransformer, - TreeClassificationTransformer, -) +from proglearn.transformers import NeuralClassificationTransformer, TreeClassificationTransformer from proglearn.voters import TreeClassificationVoter, KNNClassificationVoter -import tensorflow as tf - import time +import sys #%% def unpickle(file): - with open(file, "rb") as fo: - dict = pickle.load(fo, encoding="bytes") + with open(file, 'rb') as fo: + dict = pickle.load(fo, encoding='bytes') return dict +def get_size(obj, seen=None): + """Recursively finds size of objects""" + size = sys.getsizeof(obj) + if seen is None: + seen = set() + obj_id = id(obj) + if obj_id in seen: + return 0 + # Important mark as seen *before* entering recursion to gracefully handle + # self-referential objects + seen.add(obj_id) + if isinstance(obj, dict): + size += sum([get_size(v, seen) for v in obj.values()]) + size += sum([get_size(k, seen) for k in obj.keys()]) + elif hasattr(obj, '__dict__'): + size += get_size(obj.__dict__, seen) + '''elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)): + size += sum([get_size(i, seen) for i in obj])''' + return size #%% -def LF_experiment( - train_x, - train_y, - test_x, - test_y, - ntrees, - shift, - slot, - model, - num_points_per_task, - acorn=None, -): +def LF_experiment(train_x, train_y, test_x, test_y, ntrees, shift, slot, model, num_points_per_task, acorn=None): df = pd.DataFrame() - single_task_accuracies = np.zeros(10, dtype=float) + single_task_accuracies = np.zeros(10,dtype=float) shifts = [] tasks = [] base_tasks = [] @@ -58,191 +65,154 @@ def LF_experiment( train_times_across_tasks = [] single_task_inference_times_across_tasks = [] multitask_inference_times_across_tasks = [] + time_info = [] + mem_info = [] if model == "dnn": default_transformer_class = NeuralClassificationTransformer network = keras.Sequential() - network.add( - layers.Conv2D( - filters=16, - kernel_size=(3, 3), - activation="relu", - input_shape=np.shape(train_x)[1:], - ) - ) + network.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=np.shape(train_x)[1:])) network.add(layers.BatchNormalization()) - network.add( - layers.Conv2D( - filters=32, - kernel_size=(3, 3), - strides=2, - padding="same", - activation="relu", - ) - ) + network.add(layers.Conv2D(filters=32, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu')) network.add(layers.BatchNormalization()) - network.add( - layers.Conv2D( - filters=64, - kernel_size=(3, 3), - strides=2, - padding="same", - activation="relu", - ) - ) + network.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu')) network.add(layers.BatchNormalization()) - network.add( - layers.Conv2D( - filters=128, - kernel_size=(3, 3), - strides=2, - padding="same", - activation="relu", - ) - ) + network.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu')) network.add(layers.BatchNormalization()) - network.add( - layers.Conv2D( - filters=254, - kernel_size=(3, 3), - strides=2, - padding="same", - activation="relu", - ) - ) + network.add(layers.Conv2D(filters=254, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu')) network.add(layers.Flatten()) network.add(layers.BatchNormalization()) - network.add(layers.Dense(2000, activation="relu")) + network.add(layers.Dense(2000, activation='relu')) network.add(layers.BatchNormalization()) - network.add(layers.Dense(2000, activation="relu")) + network.add(layers.Dense(2000, activation='relu')) network.add(layers.BatchNormalization()) - network.add(layers.Dense(units=10, activation="softmax")) + network.add(layers.Dense(units=10, activation = 'softmax')) default_transformer_kwargs = { "network": network, "euclidean_layer_idx": -2, - "num_classes": 10, - "optimizer": keras.optimizers.Adam(3e-4), + "loss": SupervisedContrastiveLoss, + "optimizer": Adam(3e-4), + "fit_kwargs": { + "epochs": 100, + "callbacks": [EarlyStopping(patience=5, monitor="val_loss")], + "verbose": False, + "validation_split": 0.33, + "batch_size": 32, + }, } - default_voter_class = KNNClassificationVoter - default_voter_kwargs = {"k": int(np.log2(num_points_per_task))} + default_voter_kwargs = {"k" : int(np.log2(num_points_per_task))} default_decider_class = SimpleArgmaxAverage elif model == "uf": default_transformer_class = TreeClassificationTransformer - default_transformer_kwargs = { - "kwargs": {"max_depth": 30, "max_features": "auto"} - } + default_transformer_kwargs = {"kwargs" : {"max_depth" : 30, "max_features" : "auto"}} default_voter_class = TreeClassificationVoter default_voter_kwargs = {} default_decider_class = SimpleArgmaxAverage - progressive_learner = ProgressiveLearner( - default_transformer_class=default_transformer_class, - default_transformer_kwargs=default_transformer_kwargs, - default_voter_class=default_voter_class, - default_voter_kwargs=default_voter_kwargs, - default_decider_class=default_decider_class, - ) + + progressive_learner = ProgressiveLearner(default_transformer_class = default_transformer_class, + default_transformer_kwargs = default_transformer_kwargs, + default_voter_class = default_voter_class, + default_voter_kwargs = default_voter_kwargs, + default_decider_class = default_decider_class) for task_ii in range(10): print("Starting Task {} For Fold {}".format(task_ii, shift)) + + + train_start_time = time.time() + if acorn is not None: np.random.seed(acorn) - train_start_time = time.time() progressive_learner.add_task( - X=train_x[ - task_ii * 5000 - + slot * num_points_per_task : task_ii * 5000 - + (slot + 1) * num_points_per_task - ], - y=train_y[ - task_ii * 5000 - + slot * num_points_per_task : task_ii * 5000 - + (slot + 1) * num_points_per_task - ], - num_transformers=1 if model == "dnn" else ntrees, - transformer_voter_decider_split=[0.67, 0.33, 0], - decider_kwargs={ - "classes": np.unique( - train_y[ - task_ii * 5000 - + slot * num_points_per_task : task_ii * 5000 - + (slot + 1) * num_points_per_task - ] - ) - }, - ) + X = train_x[task_ii*5000+slot*num_points_per_task:task_ii*5000+(slot+1)*num_points_per_task], + y = train_y[task_ii*5000+slot*num_points_per_task:task_ii*5000+(slot+1)*num_points_per_task], + num_transformers = 1 if model == "dnn" else ntrees, + transformer_voter_decider_split = [0.63, 0.37, 0], + decider_kwargs = {"classes" : np.unique(train_y[task_ii*5000+slot*num_points_per_task:task_ii*5000+(slot+1)*num_points_per_task])} + ) train_end_time = time.time() + + single_learner = ProgressiveLearner(default_transformer_class = default_transformer_class, + default_transformer_kwargs = default_transformer_kwargs, + default_voter_class = default_voter_class, + default_voter_kwargs = default_voter_kwargs, + default_decider_class = default_decider_class) + + if acorn is not None: + np.random.seed(acorn) + single_learner.add_task( + X = train_x[task_ii*5000+slot*num_points_per_task:task_ii*5000+(slot+1)*num_points_per_task], + y = train_y[task_ii*5000+slot*num_points_per_task:task_ii*5000+(slot+1)*num_points_per_task], + num_transformers = 1 if model == "dnn" else (task_ii+1)*ntrees, + transformer_voter_decider_split = [0.67, 0.33, 0], + decider_kwargs = {"classes" : np.unique(train_y[task_ii*5000+slot*num_points_per_task:task_ii*5000+(slot+1)*num_points_per_task])} + ) + + time_info.append(train_end_time - train_start_time) + mem_info.append(get_size(progressive_learner)) train_times_across_tasks.append(train_end_time - train_start_time) single_task_inference_start_time = time.time() - llf_task = progressive_learner.predict( - X=test_x[task_ii * 1000 : (task_ii + 1) * 1000, :], - transformer_ids=[task_ii], - task_id=task_ii, - ) + single_task=single_learner.predict( + X = test_x[task_ii*1000:(task_ii+1)*1000,:], transformer_ids=[0], task_id=0 + ) single_task_inference_end_time = time.time() single_task_accuracies[task_ii] = np.mean( - llf_task == test_y[task_ii * 1000 : (task_ii + 1) * 1000] - ) - single_task_inference_times_across_tasks.append( - single_task_inference_end_time - single_task_inference_start_time - ) + single_task == test_y[task_ii*1000:(task_ii+1)*1000] + ) + single_task_inference_times_across_tasks.append(single_task_inference_end_time - single_task_inference_start_time) + + - for task_jj in range(task_ii + 1): + for task_jj in range(task_ii+1): multitask_inference_start_time = time.time() - llf_task = progressive_learner.predict( - X=test_x[task_jj * 1000 : (task_jj + 1) * 1000, :], task_id=task_jj - ) + llf_task=progressive_learner.predict( + X = test_x[task_jj*1000:(task_jj+1)*1000,:], task_id=task_jj + ) multitask_inference_end_time = time.time() shifts.append(shift) - tasks.append(task_jj + 1) - base_tasks.append(task_ii + 1) - accuracies_across_tasks.append( - np.mean(llf_task == test_y[task_jj * 1000 : (task_jj + 1) * 1000]) - ) - multitask_inference_times_across_tasks.append( - multitask_inference_end_time - multitask_inference_start_time - ) - - df["data_fold"] = shifts - df["task"] = tasks - df["base_task"] = base_tasks - df["accuracy"] = accuracies_across_tasks - df["multitask_inference_times"] = multitask_inference_times_across_tasks + tasks.append(task_jj+1) + base_tasks.append(task_ii+1) + accuracies_across_tasks.append(np.mean( + llf_task == test_y[task_jj*1000:(task_jj+1)*1000] + )) + multitask_inference_times_across_tasks.append(multitask_inference_end_time - multitask_inference_start_time) + + df['data_fold'] = shifts + df['task'] = tasks + df['base_task'] = base_tasks + df['accuracy'] = accuracies_across_tasks + df['multitask_inference_times'] = multitask_inference_times_across_tasks df_single_task = pd.DataFrame() - df_single_task["task"] = range(1, 11) - df_single_task["data_fold"] = shift - df_single_task["accuracy"] = single_task_accuracies - df_single_task[ - "single_task_inference_times" - ] = single_task_inference_times_across_tasks - df_single_task["train_times"] = train_times_across_tasks - - summary = (df, df_single_task) - file_to_save = ( - "result/result/" - + model - + str(ntrees) - + "_" - + str(shift) - + "_" - + str(slot) - + ".pickle" - ) - with open(file_to_save, "wb") as f: + df_single_task['task'] = range(1, 11) + df_single_task['data_fold'] = shift + df_single_task['accuracy'] = single_task_accuracies + df_single_task['single_task_inference_times'] = single_task_inference_times_across_tasks + df_single_task['train_times'] = train_times_across_tasks + + summary = (df,df_single_task) + file_to_save = 'result/result/'+model+str(ntrees)+'_'+str(shift)+'_SupervisedContrastiveLoss'+'.pickle' + with open(file_to_save, 'wb') as f: pickle.dump(summary, f) + '''file_to_save = 'result/time_res/'+model+str(ntrees)+'_'+str(shift)+'_'+str(slot)+'.pickle' + with open(file_to_save, 'wb') as f: + pickle.dump(time_info, f) + file_to_save = 'result/mem_res/'+model+str(ntrees)+'_'+str(shift)+'_'+str(slot)+'.pickle' + with open(file_to_save, 'wb') as f: + pickle.dump(mem_info, f)''' #%% def cross_val_data(data_x, data_y, num_points_per_task, total_task=10, shift=1): @@ -250,184 +220,93 @@ def cross_val_data(data_x, data_y, num_points_per_task, total_task=10, shift=1): y = data_y.copy() idx = [np.where(data_y == u)[0] for u in np.unique(data_y)] - batch_per_task = 5000 // num_points_per_task - sample_per_class = num_points_per_task // total_task - test_data_slot = 100 // batch_per_task + batch_per_task=5000//num_points_per_task + sample_per_class = num_points_per_task//total_task + test_data_slot=100//batch_per_task for task in range(total_task): for batch in range(batch_per_task): - for class_no in range(task * 10, (task + 1) * 10, 1): - indx = np.roll(idx[class_no], (shift - 1) * 100) - - if batch == 0 and class_no == 0 and task == 0: - train_x = x[ - indx[batch * sample_per_class : (batch + 1) * sample_per_class], - :, - ] - train_y = y[ - indx[batch * sample_per_class : (batch + 1) * sample_per_class] - ] - test_x = x[ - indx[ - batch * test_data_slot - + 500 : (batch + 1) * test_data_slot - + 500 - ], - :, - ] - test_y = y[ - indx[ - batch * test_data_slot - + 500 : (batch + 1) * test_data_slot - + 500 - ] - ] + for class_no in range(task*10,(task+1)*10,1): + indx = np.roll(idx[class_no],(shift-1)*100) + + if batch==0 and class_no==0 and task==0: + train_x = x[indx[batch*sample_per_class:(batch+1)*sample_per_class],:] + train_y = y[indx[batch*sample_per_class:(batch+1)*sample_per_class]] + test_x = x[indx[batch*test_data_slot+500:(batch+1)*test_data_slot+500],:] + test_y = y[indx[batch*test_data_slot+500:(batch+1)*test_data_slot+500]] else: - train_x = np.concatenate( - ( - train_x, - x[ - indx[ - batch - * sample_per_class : (batch + 1) - * sample_per_class - ], - :, - ], - ), - axis=0, - ) - train_y = np.concatenate( - ( - train_y, - y[ - indx[ - batch - * sample_per_class : (batch + 1) - * sample_per_class - ] - ], - ), - axis=0, - ) - test_x = np.concatenate( - ( - test_x, - x[ - indx[ - batch * test_data_slot - + 500 : (batch + 1) * test_data_slot - + 500 - ], - :, - ], - ), - axis=0, - ) - test_y = np.concatenate( - ( - test_y, - y[ - indx[ - batch * test_data_slot - + 500 : (batch + 1) * test_data_slot - + 500 - ] - ], - ), - axis=0, - ) + train_x = np.concatenate((train_x, x[indx[batch*sample_per_class:(batch+1)*sample_per_class],:]), axis=0) + train_y = np.concatenate((train_y, y[indx[batch*sample_per_class:(batch+1)*sample_per_class]]), axis=0) + test_x = np.concatenate((test_x, x[indx[batch*test_data_slot+500:(batch+1)*test_data_slot+500],:]), axis=0) + test_y = np.concatenate((test_y, y[indx[batch*test_data_slot+500:(batch+1)*test_data_slot+500]]), axis=0) return train_x, train_y, test_x, test_y - #%% -def run_parallel_exp( - data_x, data_y, n_trees, model, num_points_per_task, slot=0, shift=1 -): - train_x, train_y, test_x, test_y = cross_val_data( - data_x, data_y, num_points_per_task, shift=shift - ) +def run_parallel_exp(data_x, data_y, n_trees, model, num_points_per_task, slot=0, shift=1): + train_x, train_y, test_x, test_y = cross_val_data(data_x, data_y, num_points_per_task, shift=shift) if model == "dnn": - config = tf.ConfigProto() + config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True - sess = tf.Session(config=config) - with tf.device("/gpu:" + str(shift % 4)): - LF_experiment( - train_x, - train_y, - test_x, - test_y, - n_trees, - shift, - slot, - model, - num_points_per_task, - acorn=12345, - ) + sess = tf.compat.v1.Session(config=config) + with tf.device('/gpu:'+str(shift % 4)): + LF_experiment(train_x, train_y, test_x, test_y, n_trees, shift, slot, model, num_points_per_task, acorn=12345) else: - LF_experiment( - train_x, - train_y, - test_x, - test_y, - n_trees, - shift, - slot, - model, - num_points_per_task, - acorn=12345, - ) - + LF_experiment(train_x, train_y, test_x, test_y, n_trees, shift, slot, model, num_points_per_task, acorn=12345) #%% ### MAIN HYPERPARAMS ### -model = "uf" -num_points_per_task = 500 +model = "dnn" +num_points_per_task = 500 # change from 5000 to 500 ######################## (X_train, y_train), (X_test, y_test) = keras.datasets.cifar100.load_data() data_x = np.concatenate([X_train, X_test]) if model == "uf": - data_x = data_x.reshape( - (data_x.shape[0], data_x.shape[1] * data_x.shape[2] * data_x.shape[3]) - ) + data_x = data_x.reshape((data_x.shape[0], data_x.shape[1] * data_x.shape[2] * data_x.shape[3])) data_y = np.concatenate([y_train, y_test]) data_y = data_y[:, 0] #%% if model == "uf": - slot_fold = range(10) - shift_fold = range(1, 7, 1) - n_trees = [10] - iterable = product(n_trees, shift_fold, slot_fold) - Parallel(n_jobs=-2, verbose=1)( + slot_fold = range(1) + shift_fold = range(1,7,1) + n_trees=[10] + iterable = product(n_trees,shift_fold,slot_fold) + Parallel(n_jobs=-2,verbose=1)( delayed(run_parallel_exp)( - data_x, data_y, ntree, model, num_points_per_task, slot=slot, shift=shift - ) - for ntree, shift, slot in iterable - ) + data_x, data_y, ntree, model, num_points_per_task, slot=slot, shift=shift + ) for ntree,shift,slot in iterable + ) elif model == "dnn": - slot_fold = range(10) - + slot_fold = range(10) #edit this default 10 is correct? + + ''' + #parallel def perform_shift(shift_slot_tuple): shift, slot = shift_slot_tuple - return run_parallel_exp( - data_x, data_y, 0, model, num_points_per_task, slot=slot, shift=shift - ) - + return run_parallel_exp(data_x, data_y, 0, model, num_points_per_task, slot=slot, shift=shift) print("Performing Stage 1 Shifts") stage_1_shifts = range(1, 5) - stage_1_iterable = product(stage_1_shifts, slot_fold) + stage_1_iterable = product(stage_1_shifts,slot_fold) with Pool(4) as p: p.map(perform_shift, stage_1_iterable) - print("Performing Stage 2 Shifts") stage_2_shifts = range(5, 7) - stage_2_iterable = product(stage_2_shifts, slot_fold) + stage_2_iterable = product(stage_2_shifts,slot_fold) with Pool(4) as p: p.map(perform_shift, stage_2_iterable) - -# %% + ''' + + #sequential + slot_fold = range(1) + shift_fold = [1,2,3,4,5,6] + n_trees=[0] + iterable = product(n_trees,shift_fold,slot_fold) + + for ntree,shift,slot in iterable: + run_parallel_exp( + data_x, data_y, ntree, model, num_points_per_task, slot=slot, shift=shift + ) \ No newline at end of file From bc9c723a4b9e260e997d9281ad925db72246f986 Mon Sep 17 00:00:00 2001 From: waleeattia Date: Wed, 8 Dec 2021 18:16:38 -0500 Subject: [PATCH 3/9] ading plotting script for comparison --- .../cifar_exp/plot_compare_two_algos.py | 299 ++++++++++++++++++ 1 file changed, 299 insertions(+) create mode 100644 benchmarks/cifar_exp/plot_compare_two_algos.py diff --git a/benchmarks/cifar_exp/plot_compare_two_algos.py b/benchmarks/cifar_exp/plot_compare_two_algos.py new file mode 100644 index 0000000000..33cf62aad3 --- /dev/null +++ b/benchmarks/cifar_exp/plot_compare_two_algos.py @@ -0,0 +1,299 @@ +#%% +import pickle +import matplotlib.pyplot as plt +from matplotlib import rcParams +rcParams.update({'figure.autolayout': True}) +import numpy as np +from itertools import product +import seaborn as sns + +### MAIN HYPERPARAMS ### +ntrees = 0 +shifts = 6 +task_num = 10 +model = "dnn" +######################## +algo1_name = "SupervisedContrastiveLoss" +algo2_name = "CategoricalCrossEntropy" +#%% +def unpickle(file): + with open(file, 'rb') as fo: + dict = pickle.load(fo, encoding='bytes') + return dict + +def get_fte_bte(err, single_err, ntrees): + bte = [[] for i in range(10)] + te = [[] for i in range(10)] + fte = [] + + for i in range(10): + for j in range(i,10): + #print(err[j][i],j,i) + bte[i].append(err[i][i]/err[j][i]) + te[i].append(single_err[i]/err[j][i]) + + for i in range(10): + #print(single_err[i],err[i][i]) + fte.append(single_err[i]/err[i][i]) + + + return fte,bte,te + +def calc_mean_bte(btes,task_num=10,reps=6): + mean_bte = [[] for i in range(task_num)] + + + for j in range(task_num): + tmp = 0 + for i in range(reps): + tmp += np.array(btes[i][j]) + + tmp=tmp/reps + mean_bte[j].extend(tmp) + + return mean_bte + +def calc_mean_te(tes,task_num=10,reps=6): + mean_te = [[] for i in range(task_num)] + + for j in range(task_num): + tmp = 0 + for i in range(reps): + tmp += np.array(tes[i][j]) + + tmp=tmp/reps + mean_te[j].extend(tmp) + + return mean_te + +def calc_mean_fte(ftes,task_num=10,reps=6): + fte = np.asarray(ftes) + + return list(np.mean(np.asarray(fte),axis=0)) + +def calc_mean_err(err,task_num=10,reps=6): + mean_err = [[] for i in range(task_num)] + + + for j in range(task_num): + tmp = 0 + for i in range(reps): + tmp += np.array(err[i][j]) + + tmp=tmp/reps + #print(tmp) + mean_err[j].extend([tmp]) + + return mean_err + +def calc_mean_multitask_time(multitask_time,task_num=10,reps=6): + mean_multitask_time = [[] for i in range(task_num)] + + + for j in range(task_num): + tmp = 0 + for i in range(reps): + tmp += np.array(multitask_time[i][j]) + + tmp=tmp/reps + #print(tmp) + mean_multitask_time[j].extend([tmp]) + + return mean_multitask_time + +#%% +reps = shifts + +btes = [[] for i in range(task_num)] +ftes = [[] for i in range(task_num)] +tes = [[] for i in range(task_num)] +err_ = [[] for i in range(task_num)] +btes2 = [[] for i in range(task_num)] +ftes2 = [[] for i in range(task_num)] +tes2 = [[] for i in range(task_num)] +err_2 = [[] for i in range(task_num)] + + +te_tmp = [[] for _ in range(reps)] +bte_tmp = [[] for _ in range(reps)] +fte_tmp = [[] for _ in range(reps)] +err_tmp = [[] for _ in range(reps)] +train_time_tmp = [[] for _ in range(reps)] +single_task_inference_time_tmp = [[] for _ in range(reps)] +multitask_inference_time_tmp = [[] for _ in range(reps)] +te_tmp2 = [[] for _ in range(reps)] +bte_tmp2 = [[] for _ in range(reps)] +fte_tmp2 = [[] for _ in range(reps)] +err_tmp2 = [[] for _ in range(reps)] +train_time_tmp2 = [[] for _ in range(reps)] +single_task_inference_time_tmp2 = [[] for _ in range(reps)] +multitask_inference_time_tmp2 = [[] for _ in range(reps)] + +count = 0 +for shift in range(shifts): + filename = 'result/result/'+model+str(ntrees)+'_'+str(shift+1)+'_'+algo1_name+'.pickle' + filename2 = 'result/result/'+model+str(ntrees)+'_'+str(shift+1)+'_'+algo2_name+'.pickle' + multitask_df, single_task_df = unpickle(filename) + multitask_df2, single_task_df2 = unpickle(filename2) + err = [[] for _ in range(10)] + multitask_inference_times = [[] for _ in range(10)] + err2 = [[] for _ in range(10)] + multitask_inference_times2 = [[] for _ in range(10)] + for ii in range(10): + err[ii].extend( + 1 - np.array( + multitask_df[multitask_df['base_task']==ii+1]['accuracy'] + ) + ) + err2[ii].extend( + 1 - np.array( + multitask_df2[multitask_df2['base_task']==ii+1]['accuracy'] + ) + ) + multitask_inference_times[ii].extend(np.array(multitask_df[multitask_df['base_task']==ii+1]['multitask_inference_times'])) + multitask_inference_times2[ii].extend(np.array(multitask_df2[multitask_df2['base_task']==ii+1]['multitask_inference_times'])) + single_err = 1 - np.array(single_task_df['accuracy']) + single_err2 = 1 - np.array(single_task_df2['accuracy']) + fte, bte, te = get_fte_bte(err,single_err,ntrees) + fte2, bte2, te2 = get_fte_bte(err2,single_err2,ntrees) + + err_ = [[] for i in range(task_num)] + for i in range(task_num): + for j in range(task_num-i): + #print(err[i+j][i]) + err_[i].append(err[i+j][i]) + err_2 = [[] for i in range(task_num)] + for i in range(task_num): + for j in range(task_num-i): + #print(err[i+j][i]) + err_2[i].append(err2[i+j][i]) + + train_time_tmp[count].extend(np.array(single_task_df['train_times'])) + single_task_inference_time_tmp[count].extend(np.array(single_task_df['single_task_inference_times'])) + multitask_inference_time_tmp[count].extend(multitask_inference_times) + te_tmp[count].extend(te) + bte_tmp[count].extend(bte) + fte_tmp[count].extend(fte) + err_tmp[count].extend(err_) + train_time_tmp2[count].extend(np.array(single_task_df2['train_times'])) + single_task_inference_time_tmp2[count].extend(np.array(single_task_df2['single_task_inference_times'])) + multitask_inference_time_tmp2[count].extend(multitask_inference_times2) + te_tmp2[count].extend(te2) + bte_tmp2[count].extend(bte2) + fte_tmp2[count].extend(fte2) + err_tmp2[count].extend(err_2) + count+=1 + +te = calc_mean_te(te_tmp,reps=reps) +bte = calc_mean_bte(bte_tmp,reps=reps) +fte = calc_mean_fte(fte_tmp,reps=reps) +error = calc_mean_err(err_tmp,reps=reps) +te2 = calc_mean_te(te_tmp2,reps=reps) +bte2 = calc_mean_bte(bte_tmp2,reps=reps) +fte2 = calc_mean_fte(fte_tmp2,reps=reps) +error2 = calc_mean_err(err_tmp2,reps=reps) + +train_time = np.mean(train_time_tmp, axis = 0) +single_task_inference_time = np.mean(single_task_inference_time_tmp, axis = 0) +multitask_inference_time = calc_mean_multitask_time(multitask_inference_time_tmp) +multitask_inference_time = [np.mean(multitask_inference_time[i]) for i in range(len(multitask_inference_time))] +train_time2 = np.mean(train_time_tmp2, axis = 0) +single_task_inference_time2 = np.mean(single_task_inference_time_tmp2, axis = 0) +multitask_inference_time2 = calc_mean_multitask_time(multitask_inference_time_tmp2) +multitask_inference_time2 = [np.mean(multitask_inference_time2[i]) for i in range(len(multitask_inference_time2))] + +#%% +sns.set() + +n_tasks=10 +clr = ["#e41a1c", "#a65628", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#CCCC00"] +#c = sns.color_palette(clr, n_colors=len(clr)) + +fontsize=22 +ticksize=20 + +fig, ax = plt.subplots(2,2, figsize=(16,11.5)) +fig.suptitle(algo1_name + ' - ' + algo2_name, fontsize=25) +difference = [] +zip_object = zip(fte2, fte) +for fte2_i, fte_i in zip_object: + difference.append(fte2_i-fte_i) +ax[0][0].plot(np.arange(1,n_tasks+1), difference, c='red', marker='.', markersize=14, linewidth=3) +ax[0][0].hlines(1, 1,n_tasks, colors='grey', linestyles='dashed',linewidth=1.5) +ax[0][0].tick_params(labelsize=ticksize) +ax[0][0].set_xlabel('Number of tasks seen', fontsize=fontsize) +ax[0][0].set_ylabel('FTE Difference', fontsize=fontsize) + + +for i in range(n_tasks): + + et = np.asarray(bte[i]) + et2 = np.asarray(bte2[i]) + ns = np.arange(i + 1, n_tasks + 1) + ax[0][1].plot(ns, et2-et, c='red', linewidth = 2.6) + +ax[0][1].set_xlabel('Number of tasks seen', fontsize=fontsize) +ax[0][1].set_ylabel('BTE Difference', fontsize=fontsize) +#ax[0][1].set_xticks(np.arange(1,10)) +ax[0][1].tick_params(labelsize=ticksize) +ax[0][1].hlines(1, 1,n_tasks, colors='grey', linestyles='dashed',linewidth=1.5) + + + +for i in range(n_tasks): + + et = np.asarray(te[i]) + et2 = np.asarray(te2[i]) + ns = np.arange(i + 1, n_tasks + 1) + ax[1][0].plot(ns, et2-et, c='red', linewidth = 2.6) + +ax[1][0].set_xlabel('Number of tasks seen', fontsize=fontsize) +ax[1][0].set_ylabel('TE Difference', fontsize=fontsize) +#ax[1][0].set_xticks(np.arange(1,10)) +ax[1][0].tick_params(labelsize=ticksize) +ax[1][0].hlines(1, 1,n_tasks, colors='grey', linestyles='dashed',linewidth=1.5) + + +for i in range(n_tasks): + et = np.asarray(error[i][0]) + et2 = np.asarray(error2[i][0]) + ns = np.arange(i + 1, n_tasks + 1) + + ax[1][1].plot(ns, 1-et2-(1-et) , c='red', linewidth = 2.6) + +#ax[1][1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=22) +ax[1][1].set_xlabel('Number of tasks seen', fontsize=fontsize) +ax[1][1].set_ylabel('Accuracy Difference', fontsize=fontsize) +#ax[1][1].set_yticks([.4,.6,.8,.9,1, 1.1,1.2]) +#ax[1][1].set_xticks(np.arange(1,10)) +#ax[1][1].set_ylim(0.89, 1.15) +ax[1][1].tick_params(labelsize=ticksize) + +plt.savefig('result/result/',dpi=300) +plt.close() + +ax = plt.subplot(111) + +# Hide the right and top spines +ax.spines['right'].set_visible(False) +ax.spines['top'].set_visible(False) + +# Only show ticks on the left and bottom spines +ax.yaxis.set_ticks_position('left') +ax.xaxis.set_ticks_position('bottom') + +#fig.suptitle('ntrees = '+str(ntrees),fontsize=25) +ax.plot(range(len(train_time)), train_time, linewidth=3, linestyle="solid", label = "Train Time") +ax.plot(range(len(single_task_inference_time)), single_task_inference_time, linewidth=3, linestyle="solid", label = "Single Task Inference Time") +ax.plot(range(len(multitask_inference_time)), multitask_inference_time, linewidth=3, linestyle="solid", label = "Multi-Task Inference Time") + + +ax.set_xlabel('Number of Tasks Seen', fontsize=fontsize) +ax.set_ylabel('Time (seconds)', fontsize=fontsize) +ax.tick_params(labelsize=ticksize) +ax.legend(fontsize=22) + +plt.tight_layout() +# plt.savefig('result/result/',dpi=300) + +# %% \ No newline at end of file From 492124433b505467f4b128caf5193ffa26fefb76 Mon Sep 17 00:00:00 2001 From: waleeattia Date: Wed, 8 Dec 2021 18:17:20 -0500 Subject: [PATCH 4/9] adding experimental results --- .../result/SupConLoss-CatCrossEntropy.png | Bin 0 -> 592518 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 benchmarks/cifar_exp/result/result/SupConLoss-CatCrossEntropy.png diff --git a/benchmarks/cifar_exp/result/result/SupConLoss-CatCrossEntropy.png b/benchmarks/cifar_exp/result/result/SupConLoss-CatCrossEntropy.png new file mode 100644 index 0000000000000000000000000000000000000000..88b5975730732542aaf509139415c28992910db2 GIT binary patch literal 592518 zcmeFZi96S6`!}wdGE-Bg(jr^aq9i1HLK~8hootn|FWIv<(=;a0f>20>kX?46Whh0; zzEjG+WZ(I{&T8iS{Ep|jzkk8)IOe|F_`KiOa-Og4x~`m1l3Tfa({cs|hL!S1kEk** zta*xGy8gHff8zP{Knwm)+(Aa$LCwb4!TGe^IR?ek4(Bax94yVwY<4EyyRt zC$M|7se{9L`+fZUR{#D3J{vm|{`xAWd-x$gojs| z7sC3QoiC_s_>X_f!0_eleTMb_%irtyEY|*C{`x%k@>BoI$3KnUd2#9g^6kM7 z{`=wo&*Ay+u>U`Yg@NI}3+_J*%zwAqe;62s{}!45Ff#uwGXG&<{##`J!@xX^uE>`7 z-tVK1Z{X8=*Xp&9xORt9*JyX;K9A{iKK=V<#5izYdqk%Hsz@$UdO19ymVG9Dmq*m1KytUao_@R7 z|BUun`oj;go^#XYtp)k!Rk5ed3>m)cT)ld=`MZ0+m1Sc*ayGNJE%Y>G#AjMJ-#J_Q z<-?2WBz=hu=SEFqlp|_l?pbiuPFaG{Ca$ z>2K5R*QNb^yPNZ}rboNmANbx_6H=u?+1pbMcS;AIog5@3~bMu7BPGg zOAz?}NNpT8&O5&Ux;EFbH|WomtPiiTOXQ|cR!jFc<*4x&&Zgx$4<$6)`xN2juc}{*{6t-^CzJLEd-yrAR{+{aiO?%J&RWLiO;2g7f zV`G6wUPGo;d`ik5{FdSA+DjSl19}07OK#hzkkfIZa!&IlVIz- z&p(PsE2*p5xm6=V%7dAOrF+2P#_R3dwwYogF}k;n%kL(F0yxL~Q0Fi;e9q&`Gsd2Z z=&P&P_k9|0U|_hgB}K$FjaMh_NZB3XN@eM}sO)Sp=a?l7N0)|3FfjN(-?ZfWW6tyX z<*ohp>x&i%`gxijGvn3$jakj%uM3-=xTq~z_Vc^nmM{vLzS~US|8%WQYk`OBVE%-| z$G?Aaju}4cH9r-zierDQZF_N?cIue{CC0dS1I>A35eu_&r%#_AP+GcY4JT)EoLWrJ z%wS<{S*U1p_$mel<)3S0m6MEKUh95bE>-zZF|;XsIm704r?G)Au+?n*{&Zh1vA6Mec^A|6wbL>s5 z8Z*n9<2J9$aUFZKjEN)RZMcNm?DDwc@&3l@Oe?Sp zHOGNm9g#nNyy~yjTSwpU7m78>K0JO~J;(0cEk6B|MOQay-QDjJDCy?3blI|9Rt@Ps zety;144C7%c&+4JyLTVYa~`b02s+Nsy5!B|VO<+|jA_t*5w@st5VN^ZfOjqn5jc#+ ztcfj?SGmZS3a6pR*sYH8h_8$B!UN2|9k0O??X9xD=etmnF4zg+fA_Dl`1$)g>^Zz> z_qSg%t;c7#zWK6Xnr$L_QGgCjq!+j-)ExP6CuN9pZ#zx=Y<(9qDu z&5cJyL=$yB*Y2~=+X(4oY6MSzTQZILbsYXA?>&q+OxvaLD9X6(j`*l61H-8ZJn~9b zk>ihz%hF~tSOksL<24hj5_AML9;wS47#LjlQw-({r-l(GZhy@Gu1m*lMrqo1#&5&o z&b^GZVdd%OyG_(o#i^}hW3zOgzp!@}Wp9(Pd0<{8LtjOPg;uzj?ek_cnu60- zqn}Quo8D}3>120~S;TPk9Ibo~Z(&ja=qn`hI|;qW$cmq^Wy_X6SH+=^MYM2)%_@%6 zKQ_r?@mlz;cehSjELJvJ#Q3EMb_u8BD>m@q*&jQ6e!VVlc;o&HDZgL3q;c=SqC5je z28O};D#)lWOTvp65g+G@xJCeNK=_=%)lV_JFQ6D|5=gZ|8$oNu`j%t zdM8@FM6Z?KmCReVa#Iv)%MO?4DUv?S+z+x^J$3V4$0|?{A5byQvS}4@?N8Lr60-Yr z)Q?55m^%mkOD=TJ>EE#tjoAkK`xBG&^LqNTS~bzfD@RKFw_h!o{J0{+s!{XspFh7Z z3E1Ih;Eb2bpJ;KL#y|Vi-`_v_ghus(NR2Ei8b>KRgP20~BMsqmubFB1zkOY7Ji&nO zim#`K=OzjlG~C^vs94_Gr7?<9$E^^wJLuEL!NFwFmJ5gdHVIeg+T=%Lac#`EM%Qde zHS)geIc=sey!gwf67;XH#ZKrVv(eo2*d|HWbX#|9H2OhtCo{}zwvR2ut48~HF3dTP z^*3sTidgM+Evre?9YZ(b| zcTW*c!j?$J+K-&XU!rd7@YP2-8lr{^pML%WM(53>aC@;|Wp`x^Zz<_j7k8Ytf^5@A zo$jBn^L+nK^F1t)jgk)fDW?v4Of)+WkBrzK)SUSA>gM5q9m<+XdO5xyv#gsR0oh!A zyqKY_M9{Ks4_Z&zP7yINZ9$W_RcgwT=WI0>IyyUJ1`8JqP>n==3O!~r|77B{w$-Fh z+`rTUa@HxP|8oHBRsNZ@C5b09Uhf`^{9ZZL66{RgBa9%Wt-`Ap%CbOQ{4LFn`(bBLhR-C2XpHzD}+Dcc??q z2f3xBT8#Bjsq}6rR=7?KSTQ?)dbPUf@4vOf0E?9W#5U+{%CVPC#iqJ{ENHixt!xKY zNCN5@Yf+MJmeLO8yXhiW3wXWt|M%+&|09yMoj%U_BuPEazvy_fyQFAtA9?LAqi1mkbxH{sN+7 zNMQsT`RD8DKIW$cQBfB6+`M^H);7n!GgEA>Ai9r$kA&T)zcQ}iuYL%Tbj!rHHx_*R z@}(T7*KF#OCr|Wl-149y2h`nE#HW{i0>Es&2`P)4GydQbLnBbfn_hiv+oZssf0o%9wbZIPxB88sAqPVv4i&fFs7^QyL13+-6GScTz@BRW~kSZ{4FK=VG= z(OA6I!`RrJ&G~Lx7$~hje`X03w>Q4ETSpa8SPivBBkJgC=NKzBJnuC&@dv;^wAzT- zpe;CzbSPrdck=NmnN>d2Kt)V5E_(zj(-jwUSGCAzokpDM_H%E;>YIa4^?@&(%szi# z#Io+*B}OK|cZ*{ITz-JHu~;ebmi1|mFoQiciSb|>&M`Abasf>;tr{QB%}jiKTDS28 zKEr~9t}fGNbUU+(D3x4?ZUPtRf{K_oO<;XAP3~JfT1uB!Y*4zn<5lYhR@Qj*Me~=} z*!Ma0g#i3G4m63&$jK2#^$+CJ-xsYAtVG{j;6A0EZmL3zXO~t|Sj9ud-VUClC-wBg zfSw}hXP|%Vv;FwTmaSXY0Tnc6+3;-Me1Hb+x5Z28Tk)^k2j&Q6WQAHBKd5q1K83=g*n0(?CfmD+OQqU9P4&#pTH|>qM~X%IC6zqGAKtuTRUVupI$6YC7#5`$43Jnzy>h<)NRk~_18qolG}UOA6LjLBlFMNv^vH+IPK6)P%lZk6LG-Oi7d z7ZS3u8B68abl1H5UV2{Q-Jf3F2W^kTE9}zGO$QC{_777Fz#{E$$=4;Cd}Zwphv`u> z8ax0)P`8`gP~>ML98G|z#zrfZdC-9h3JR2JVXW2x6tUY8EwcfdRud0fQ z3fkG%m4FY!9MLxb@T%kco5OPt0|3ULm6;8;;K9}BB!(LVc*3n2jDU3O8Z0=s*#B=RC-*yp$48n7T(P%2eqL#zAJvB9Tcx=pL zPmsymFdcEbPbV;U>IvEcDBZ`NJ$tr%`Et97&)Zn-8EeI`n*hs`0k?W4huUc&Hl&-~ z;#T959IH~>Wmxb8>-_=fU`blQ0P@SKR;tb0tMPl6HL3Wn0%{c*ju$y6HxBcfekgx3g^vp0sOYl-Ru+k_`(B@Du@OOHmPM#=ILuGtRwPAG(95vn*5#T{}(a zy4)2Xp9*aMvBHHpWs~x|88c%v$Bmg*C>1>C%I`M+{xf@)$BYv%01MVrHQtHc`~@h` zIv!2+qeuV30}l9~(cXS6@aS+ScGYaH@jVIWfi7=O&vyi;Q6L-pwxUF$+ti|yMGb#` zWAW<7uXjN3{F8@9MkGX9n`F&$%2q@&`D< zsmFy7ed@y_d8zsUE~Z=7@4bBa@^*`nqaa@zg`OTHQK0r7Tg{YR3@r=Pe)-Ck)o3w( z^UFAk%FC5OT{W(*=dHST@Dj6z*g5be8sI~R4?FfZ2V~g-O)1?n_ar=i(eRcOICokmLzd}tsnQ0jZL{B|6ZDwqNdG+f1(({w;H>DeM9plR;w6HI$ zQ%=9wW;EPUW-%fu<~*Rca+6S3sx-fP?2RBk{psWd&%QbV;`9wErw{x0w+5qFlA@xY z>!1>}TgTU6L<|6-A?@5%&18c_%yeUzMCkILe^$o^tB#UiD{Nl%iZ`_1$jB(;Own>) z{akfy3a=A?yWNQqQ9t2bz01fA0=P=QpU}ZaB+KCY>kRL!WQYM0FXrYMK`b6 ze(b9fS^3a!`TE@5Q82GC8z7uLk^}T~jF8}dIEMACDM=-P@&pDZtiFcA4x?84W)nb%--rav; zR4XTFT>1{Mh_g`^@Ix5HAa5qK7GOgdlbfiQvsY(g^WcIMG}@$njy>82uYTR0=op^i zY5ONgp(z%`G)9dxFH=wjz=S>z4Z@md9`v?=aboHCVC!JuM>(21ELnOa+B>)Ro;&9G z;QFbt)*tK{1v3j6$FN5+Jqu&4{3v3px5`4`m64r0TYgvn*I$1HQVXs0d5IOG;59qs z4F(qp$yK6B3H81z-ApaR^!*|Hbo*o0*L<#Dubm$2&A2dhCea}Oq^Nb%`D|W(etwPE z6Ca*6ykeILSL+=M@#NRZFxODLy|*ZBzv>#ff+dCgzy3<{XfWHmxn(6LqSuH(e4CJLmT-^Y520EdZGA2$}f<;+jnKK0f#6p=RRAI4nFR-ephh zJ|F9EEo?Q`1Ili)Yx{_Qgo)|cX5_P;_Yu^XoSh(d^d+|TU!-1&EMmYN$?1BNQO@5z>d+U(d}QDofYzg*X z*8P>bUHR^QS8RNeZnDC(+1IX(MB_BCh?37e|KWGF#<#b4f?Y?u?swjGBM9$k-Z(CX zg)MuHElx31L_9hlRng+zy@RgTKC!QjF{00b@2kZ2G2iRDk`r+bs{fKJJU-D;%-v9-L zyyy?HtWe|&C|_k_wVnfXxL~S-HB_8eH?x#mW_O=}b(5IKOn;Vq@u%0f6u{S=jtTrl z7&|7yvjS5SJ}(YLzcPQQm^CQ*H1|bEWe^4QqDY{GM94ZsVqXb*U$_tixXvJ?7j2MA-8_(tDpn7dc5WnI4A6PWC?hW3e_N z_3P5}u^RE3;*PT;V`Ca`f_SU5Yz$-8-8y>g_;FvVr&uaFc02!m0h)xGc`^>x)Iu)* z9X-u?#XZh@I*$(B0=tMuwILz2d2VWY8XBR~bh+1j`@4xZv;)%4Ug0_(LQJq>&Q+Hr z(OLsHMPLPI*GBm(P=2xL(Gs=+k-qKjcXM-#N9&-;*GN4hgI%W_H|M(Z;+V~~^-oWx zp1Dj57S&UnH`sNoC(f|Yvt-vFw^VC3`aW9-<|%F?Ne51uOJ6yZw798}&V}AWv}9e9 zC$4cw_5|+lsZCB^v}BpsXU`d!XH|ILP}}7E@jESUgZXDCO!xz4sv|tcZvzx|J@EZj zDcX*6cmBx#?bl*5C_a872~b3jcr$T29hA)^S_(ZkJ!*0t#~@~lwzJTwrwlR7h5iFp zKhJ*-Hmu6FHJY2B8N}8z@Lt7Ufufx(k6jLDLmeu{d%Vo<8C`LZXBr}i znC2gzUk(}_5_=QOuZ7(s(YUK1UXvdXvkw14 zdSO-#xFzdJTgeNjfhIG(AMY9DyRJ8BX9KR^B;}qB`DL);qgHLQVJ#|Qb=o-vIA8nn zW3M%x<}&Ud$6`r9Jc02k`WQ7w!>rcgSQfI5HpOBr7PCQV$h~BFsh|vxv!zD zR3#fqdyD*_Fwwz97`0azZ(rfHFk=WraugOx#uJPbP>ei!a^faY>jZ#&-G*A4LBJM| zg}I3!=7To}tO`OUoRhHj6P2Z=j-dafL`78LVMJt$BAWyv+^dWuz4n$#bH9K8ev`nN zKL~Jv=$#xEPc^IL${(#z=xNMK-fq8VRvO-uI%Y%|@*$1dyx~v?%2zB_dXQy@2eUHk z4#|GkQ@k~@Z&Oj9(WMNM3+Hodk_~O9*rZ^bVn5f?EJJv1Oq*~$b`uH}KvSH20M{l# zW8ccFhVm0Wj;l5a9mNK!h1ci%Iynuvk2BH8O4i6T}FrXU$g7NX+B z`LbGE6+n@zfM3E4W{>$RMM!;pTeRwv`K}q2jn02a)Z`|e2N$Ix*wd8r81SWgbci47 z4eU`3EC#SqX`SJTE%KmG)1W}rfOQ_D@8yDMfdJ6MfzvT>NRKO+8IT}IGb|PUTh1j% z5+Jk;J*w|N=c&lY_FlV#ZQ9cXnu|Gb6I?(W`48*5TPCbYX$KSdl)hz0re$-k2B^h5 z2qf0^(|ghB$u7I=HXxoq@>U|KCHM=`F|hncR2OXW$AZ)_HWlykS&*9c%o*HK6&fVp+n>s0={v@C!z!M*msnQv;&6e&+E6K^`!$3Y%?N1<^R|8 zS^sVN$H98lA0B53wS}NZUZ-_yLI3XwH!Ae9&mZJ}^5u8zveWW2=xCK#)hQD z3H%RV@^523lyv*~_{ESUs(+km=${ahZ=`q5G~`SY9L zc%UGfYkgIQAyBkUV(0G}xO`scE&8k28-QtY*8A%pzf6;Y6FZ8n*#l?8zTU05nRGGd z7MC6MUdmh%trHkmMeL~RKYm)PC!*|D2i2*n(927qxR4w!PANUIPw{!-4dd+u)&R|s zImm$d-{vo+mIlEm4l^RsuBV@FKQYj}&t*8OESq3;UIh9!ISSiDbAowwAHu(}0BaAL z7>l~%>stj^ArZAj4FLhZ?veSy$|&v7$o*DfqgX!{DI&-2f|QzqcajOb{c@)wRcI7O zdCgxZ1N*QJT)U%#g`m^Nu%EmU2v!n{y)ZrAXrpiEb;zI46p*3?q{X0SQ3(hZfZLxk z4{sU>mGYpZgLS>fq!%QqT2`AU_hI=cqd?ioM_m7SEhDLV&6+i%cs#P{8nSI8KhHrj z>q0Hpyv`xXyl&m+s?2h1T!JlVgHG}X`@BdLE561azroO51uM+nWnuixz$rhhgt5MQ zHRwkr<}X_YFAp^3L{cR~)1^2#K}YxOzyF;oaah;?dS0xpdq3DE<0w;H9Lw|B?((?2`qVY)-k-2oxDvUYW|iZr!?Qv=y__uJ?S>!tR}K!>NiO zmNHP`IXE-uwGa;^u{|$*hA9^DkCReT;Er5eT)F9{&mL*SJ3!^zW~5qScK5@F4+MPm z$J+*^$?nB>WmpC@LS>~o3JzQ4KOV5xZQPRfSkQ)WyvJ==6n&!sQJm-CL%E`P@V1^s zdO|ZY8~=Oqj-2^AbSdzlScr<&+($CNNx-5TxZCr$5-39F&bAzpGc}EE@mlc6vX(u6 z8Ojfpvl6~T>DBLaf@VOo!)Nd$Bta_tG(Kt*jMB;mg`-EQ%8z1!>`PR13zKxy zq_Xx#)FbK$S7_CX7ccZx)5oO(em{iXo(?+Nb)FM6@dQmvM=6VUo%)hQhVC!Q#SnCuAI)hl~6F6!+l7r z3O4ktZ(shNL}~13;du$iAV78U*a;0)uIQhwM3oq8vM4JE%f72D&=enrXIpFy?So0@x;w7 z*y}*ay1_{=IZQC?7$Y|yev#)*&!z@i^5dObJw)rhYGH4Hyj-z$hdw|n1{J65^8|bQ z$B*RLj`dW(@gKDQihS9`E4F)!%vJ(Yj`hSR?-CNKGP81R7<(!J)=WE4&xwBq%~=(? zzU2J*RmG=jH(XJ~9Ke_y^zY=M@`gt;M=e^Fb^2so>FX`?*T#eWmv?h}K9Is}WDZSR0zijFl{BzHp~ur4#g z^8>s_R9@Z1zW>4r{({K|x+3Mx8}OM_MNk%l(hH5tHj$)O(h_BI9f2f23{Kj3dwY9^ z^PoXwU0_=3p6yqzUah2B3M7&vd>x8k9K}9@18I zN6u#iF=zyL%ukym~j#V`qC7cq}md}Y=)Eoy>5cH9R zHcXCaN=nMviBL9BOj?s+(>fAX4ML!8EoS}VTVSuCBRL%V?Gzak#`rAPc0W-)DVtd= z(lMkda?+KxjDB_+*l77tdSU9i0#gvVW(IEk!t7G+b@yw7cd8r$I2UQ3pB+&SX3v~N z5`ugp@LDLr<@-l`6^6h0=@*6nC&(yf2%JY_ywRpd%zZKii0A=eK@nBovm`$C~HIVV%m)IYy6?_@~hE-mHWMaW;rwR;f)s>lPb)bQF#5_ zJ9i$1n84IxkB6f64Y{&U>ZTJQ6~Qe7*Y53k1`}e8OdM4tUB7yDG|w;;b{eGJLoUOg zRA9?J$R2VP(dJw#sqKlyInXtd=+?*O;J*Qhf>%cPlg=i0)jr>I0+#82<&2&joogjxg=Hr zDRJ-FLv}OLlEs%MGV+<~2n>*fVjP9vzKad9?Jc+}O@5T(QvT>M%@`BB;cCPGB_5E6f37-v0VPfx- zYNO->I-X_VPc<<5#}I5fV`N05s9)f&Gcz%ml^6RNt%g`KB>dhh`f6|xxwmaQ0&1_g z*BXY`^p_V*x48T=uJb17X1(oLHVCsAURde+xLoOc5FZa1uHx6P!`yhb?PU$#1 zY0EBd_xHfZv)HLM1ojXEP{A}hb&l@&#`@a!CHS$e-LN4B<*xbunL3ueXHS4@-y&lY zfi{emr>7^=fQw$Cr#>VCGO*ZQ^J39Z&INIWws*a1K6v{cOymhm%Y^1+Uc3R#IO;qP zW*nr977ZCpSnG9jHi*L+KyTIuAtigTd20V#fYs_4WzM|8oC}al&X)L-*acI5m$bAr zDGF%x`!0OBXZzbII*lrh`~eWSeI%vjY1Jt5!5|#2>VtY)zo3EMw{KRjva$K23MPWjBi zSIdOfV%ZA$ytyMBj}(NJ{HnO>9TDzloQDwBJ0^L-3j)|&sx68Dr4kSb83WA5^sxO(^8>$*dJ`T#5QGXp*DSQ zCAVJ%^1=`63(Q2I$$G@Y4f(~DEPMaH5UGq8s(#{PPyQ+-w;M3P%WA>id%NC8l8FJy z+sOXyR0`0(V#uC@B&QBpa5z!cY`A2HY-&qOi+RhF9Ds+10J1C|o_Yv>tXRMM9qO-d zrvb%YN^c8NLZ7wB?Ey@&`rQ{D3*VVxHg}g5on7LIw#3#@3U}jBo)6_~;}2e1MzR5M z+E3Xo3-)JFV#&%K+HZ;SMKzx?!R=4Z58C%6X0@cnPioLRK#R;UtCU^0OZ{kCNFz7_ ze!?!SIt8c$LIXyqpd|Ik`ffZud&g@y-L?W;IjN}Ol>mCMY(7ysn}lbW~}s`#TKJ_3W%!6Fm%R6J$}+ZCTxp7c=*8h{3}^)R59gkC-i z$1@Sdw~K-p4*(<$na63m2f$sGR}{5Nyma z@bcD=l#d^G==x=;+qP}ny68LdCN;>=OEr>%hpP^fK)F{5cxBNGzxW7SP?H{Le7*El_F5%0?A3(YgkoN6}~Z#Uijd0*-lC zZ|y$@GnVo%jlOv>30%kf%(On-g5*y>hA2NpowqTK=b+_5KTXC)vw&k>?7=4Gt_=y{ zJy`Ik^UvDBPLZE3oopQe9@fB$r)cq~Ne4sJ!}a_ISZKy(C^oVlM5sxhBM~if(%S`8 z&~{;}+zVmlTky`1Yyb*Sqj!hBwe9Lgbf%7Horz}mi-`bl5gVzB3nC_{f^+JuE7BlST>|<`TLfj+rXgtd}4;$8uj_MXn_wdYNOUkAKq|s7=LHOas{V)pR(UAnW zqP`!|P97nJu(gD7uN60}R zHca+CE*CoYI&j-e33@()!@Yri=dV{{<%Wh9Q=oKzjm(iFvDob*?Z6Iat6E+F*P4uM zQ==Q}1CnI(Ozc^1|ZZY5}rq)M^xnw>6F0q5INM5t-i61mMcUF`AkTXgJvj^vL4=l{LuxO zSGne47X(=tGd=JjL|fDY1&m}94GUXh>uRJG@9wYfvcCt|I6eG^pPjFT1^LMc&uMwu zH}ARbqw82;BAsk@yM*x6Qih`nF!<(KUrBpDc`C_e;u_fYyu5bK?ZqpRw5T+;WssX^ zpBQXShQs_e`+_klJHdPJ<)2%!Z4Xl?hX(TqNVdV3MuHMc(Z|;1&A5hiZ?ebEbn@be z+D0X-E;aHRr?mC#9V~}f7<`nzYo5cC8;~!Dx@b0g2g{{izpG8^J0L%q;MeC2V1)q0 z#&uDzDgXcT#(OB@-UEzH&X6)a02#?W9I|Nfn9zsBTKPe0`(qb(>?^Al{>Lvdq_Bxt zFME5l7GWdn5w=6q=pWz#GbeomP~U35@*w1w78JO=ndz7Uj~QC)vZ+XWO}AZQs=|`a z8xWV3kuih)I*oX}Uv2e5V3^xTMgC$bAW+e3{02|5v5P;xb4O-(gwAXbgqghqS`E-s z`9X1>+bq$dX!>&m0F|lMvjAi@P3uxlQ)}0sYeIAvP8jL#zQrd2^QqFq{TCc~ao0@` zZKoO#SEIn)rG1?99Fz*uBvx@sS4189g-b@#dvnT1rJg+Q zyv?m+SFj`*-GuxjXI88L5|$|b(q1dpUJ%by3vV}x{nGZwgW>+F;43tjl#dH#!S{)hj|AVW-(4EUhTX1-#t`5O;OSGgMEqN+y)mql#q-m* zUt8A#BoaN3!DDUh& zJmL@;Na>?pP@rxYq7l^UpghE({$01EL!%-yd4Q;xDSi{f z;w2CjQl@S^!easr8R@Kg2tnN%AAE^P>K)bxyXOgm+^p1TxN(|L_I*DhVQHQ2(qqe| z6lSV7y9*Q?Qn5PrHhW7SybL2c(ug;d9r})bI!_o*Ac6lpdlLS9kr2Q%;8yW~MJd)1 zopQj=)Hx|O5CL(>_an}`+Sqc%$w9-Fn&}g4pM*7FiXyd&TxQ@wi!W`@DDeQcWj%Ta zy|jnKt;2KM<%)e-gpEr#(lWY#W(OcaEtY*%q#RS^B#K%YJct2L)kwgaZs6G}ID@qP zh{=L3A*HN^-Z_fS^Yyg>N`+iUO<{)}q;i>72JCPCHQ`h^H{OVCsGjBnH@0Id;D9O^ z9k!e0=pB-RluW%sN*?}+$%h$gwrT@8%zHgy?~ek6ZCWQTiFCZ}eNpAHY?qD~XnE1F z0p8eYBl1v|AxA-T(7U__KcH?}7e0oRL8sM(5G?yH&+W}T1yh)R3pfL^gYWTPvS^REm!0Z(PZk;LCp6Dbx^vT z*0JQ0`}UU7?%;rCvy(%s*1aQ$1Oz60;mfJ^+PYl{UECWlv9n$ZhzEBiOBMuqySz@? z+4sozkdfT}JQJtO^HHLWov(2*?j;L;QevmvZFjjkI426YafiMxTKs#I?8Qnpv3I8J zsaLRy02`D+r@kH?14fRg$N>)A5f4CUP%N4=o9Hy9L|{o2eGq{pOEHIbwXLw8giYSE zdCw40M7}o`KP@yBg^~1qQVqRl^2!?#mfGl=VBNX`4KQ$)pWU=?2*$6xY-jm&b6zV2n0p zQ4r$iq>}-vvGw#8K&G-lXqAuWcRCOfnf=Kic*hi&mIJ73WF6YN{=(d#SNL$2AxbWg zZR4c91uE3ZJSVHP8R1)KWoWat+^i~$FxS62n}C}_^>fhD3cU$3nc4ohS7DZdQcFM zH%n*f#eRb4q)qCAnr8%xbp`2kq%H59N0B(@q$231ol%p~fM$Tx!e)YRLFNqIhb{v< z@N;rToE_82cXgbDbS&E{x!bUyDf>bTwkw^s3AH7J1R6eA<>dzOw32;amFbh4Y%_<1+_!m3hO4&J)r|)Vv>Re`!^WIa4I>021-xx&4$8py