diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..459f519 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +data/* +__pycache__/* +venv/* +logs/* +history/* +src/__pycache__/* +models/* +notebook/.ipynb_checkpoints +*.csv +*.history diff --git a/CDL.py b/CDL.py deleted file mode 100644 index 8103f13..0000000 --- a/CDL.py +++ /dev/null @@ -1,163 +0,0 @@ -import numpy as np -import logging -from keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout, Lambda, Add -from keras.layers.noise import GaussianNoise -from keras.initializers import RandomUniform, RandomNormal -from keras.models import Model -from keras.regularizers import l2 -from keras import optimizers -from keras import backend as K -from keras.engine.topology import Layer - -class CollaborativeDeepLearning: - def __init__(self, item_mat, hidden_layers): - ''' - hidden_layers = a list of three integer indicating the embedding dimension of autoencoder - item_mat = item feature matrix with shape (# of item, # of item features) - ''' - assert(len(hidden_layers)==3) - self.item_mat = item_mat - self.hidden_layers = hidden_layers - self.item_dim = hidden_layers[0] - self.embedding_dim = hidden_layers[-1] - - def pretrain(self, lamda_w=0.1, encoder_noise=0.1, dropout_rate=0.1, activation='sigmoid', batch_size=64, epochs=10): - ''' - layer-wise pretraining on item features (item_mat) - ''' - self.trained_encoders = [] - self.trained_decoders = [] - X_train = self.item_mat - for input_dim, hidden_dim in zip(self.hidden_layers[:-1], self.hidden_layers[1:]): - logging.info('Pretraining the layer: Input dim {} -> Output dim {}'.format(input_dim, hidden_dim)) - pretrain_input = Input(shape=(input_dim,)) - encoded = GaussianNoise(stddev=encoder_noise)(pretrain_input) - encoded = Dropout(dropout_rate)(encoded) - encoder = Dense(hidden_dim, activation=activation, kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(encoded) - decoder = Dense(input_dim, activation=activation, kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(encoder) - # autoencoder - ae = Model(inputs=pretrain_input, outputs=decoder) - # encoder - ae_encoder = Model(inputs=pretrain_input, outputs=encoder) - # decoder - encoded_input = Input(shape=(hidden_dim,)) - decoder_layer = ae.layers[-1] # the last layer - ae_decoder = Model(encoded_input, decoder_layer(encoded_input)) - - ae.compile(loss='mse', optimizer='rmsprop') - ae.fit(X_train, X_train, batch_size=batch_size, epochs=epochs, verbose=2) - - self.trained_encoders.append(ae_encoder) - self.trained_decoders.append(ae_decoder) - X_train = ae_encoder.predict(X_train) - - def fineture(self, train_mat, test_mat, lamda_u=0.1, lamda_v=0.1, lamda_n=0.1, lr=0.001, batch_size=64, epochs=10): - ''' - Fine-tuning with rating prediction - ''' - num_user = int( max(train_mat[:,0].max(), test_mat[:,0].max()) + 1 ) - num_item = int( max(train_mat[:,1].max(), test_mat[:,1].max()) + 1 ) - - # item autoencoder - itemfeat_InputLayer = Input(shape=(self.item_dim,), name='item_feat_input') - encoded = self.trained_encoders[0](itemfeat_InputLayer) - encoded = self.trained_encoders[1](encoded) - decoded = self.trained_decoders[1](encoded) - decoded = self.trained_decoders[0](decoded) - - # user embedding - user_InputLayer = Input(shape=(1,), dtype='int32', name='user_input') - user_EmbeddingLayer = Embedding(input_dim=num_user, output_dim=self.embedding_dim, input_length=1, name='user_embedding', embeddings_regularizer=l2(lamda_u), embeddings_initializer=RandomNormal(mean=0, stddev=1))(user_InputLayer) - user_EmbeddingLayer = Flatten(name='user_flatten')(user_EmbeddingLayer) - - # item embedding - item_InputLayer = Input(shape=(1,), dtype='int32', name='item_input') - item_OffsetVector = Embedding(input_dim=num_item, output_dim=self.embedding_dim, input_length=1, name='item_offset_vector', embeddings_regularizer=l2(lamda_v), embeddings_initializer=RandomNormal(mean=0, stddev=1))(item_InputLayer) - item_OffsetVector = Flatten(name='item_flatten')(item_OffsetVector) - item_EmbeddingLayer = Add()([encoded, item_OffsetVector]) - - # rating prediction - dotLayer = Dot(axes = -1, name='dot_layer')([user_EmbeddingLayer, item_EmbeddingLayer]) - - self.cdl_model = Model(inputs=[user_InputLayer, item_InputLayer, itemfeat_InputLayer], outputs=[dotLayer, decoded]) - self.cdl_model.compile(optimizer='rmsprop', loss=['mse', 'mse'], loss_weights=[1, lamda_n]) - - train_user, train_item, train_item_feat, train_label = self.matrix2input(train_mat) - test_user, test_item, test_item_feat, test_label = self.matrix2input(test_mat) - - model_history = self.cdl_model.fit([train_user, train_item, train_item_feat], [train_label, train_item_feat], epochs=epochs, batch_size=batch_size, validation_data=([test_user, test_item, test_item_feat], [test_label, test_item_feat])) - return model_history - - # v and theta - ''' - def lossLayer(args): - Vj, Thetaj = args - return 0.5 * K.mean(K.square(Vj - Thetaj), axis=1) - - - class lossLayer(Layer): - def __init__(self, **kwargs): - super(lossLayer, self).__init__(**kwargs) - #self.kernel_regularizer = l2(lamda_v) - - def call(self, inputs): - Vj, Thetaj = inputs - return 0.5 * K.mean(K.square(Vj - Thetaj), axis=1) - - def compute_output_shape(self, input_shape): - return (None, 1) - - def fe_loss(y_true, y_pred): - return y_pred - - fe_regLayer = lossLayer()([encoded, item_EmbeddingLayer]) - - - my_RMSprop = optimizers.RMSprop(lr=lr) - - self.cdl_model = Model(inputs=[user_InputLayer, item_InputLayer, itemfeat_InputLayer], outputs=[dotLayer, decoded, fe_regLayer]) - self.cdl_model.compile(optimizer='rmsprop', loss=['mse', 'mse', fe_loss], loss_weights=[1, lamda_n, lamda_v]) - - train_user, train_item, train_item_feat, train_label = self.matrix2input(train_mat) - test_user, test_item, test_item_feat, test_label = self.matrix2input(test_mat) - - model_history = self.cdl_model.fit([train_user, train_item, train_item_feat], [train_label, train_item_feat, train_label], epochs=epochs, batch_size=batch_size, validation_data=([test_user, test_item, test_item_feat], [test_label, test_item_feat, test_label])) - return model_history - ''' - - def matrix2input(self, rating_mat): - train_user = rating_mat[:, 0].reshape(-1, 1).astype(int) - train_item = rating_mat[:, 1].reshape(-1, 1).astype(int) - train_label = rating_mat[:, 2].reshape(-1, 1) - train_item_feat = [self.item_mat[train_item[x]][0] for x in range(train_item.shape[0])] - return train_user, train_item, np.array(train_item_feat), train_label - - def build(self): - # rating prediction - prediction_layer = Dot(axes = -1, name='prediction_layer')([user_EmbeddingLayer, encoded]) - self.model = Model(inputs=[user_InputLayer, itemfeat_InputLayer], outputs=[prediction_layer]) - - def getRMSE(self, test_mat): - test_user, test_item, test_item_feat, test_label = self.matrix2input(test_mat) - pred_out = self.cdl_model.predict([test_user, test_item, test_item_feat]) - # pred_out = self.cdl_model.predict([test_user, test_item, test_item_feat]) - return np.sqrt(np.mean(np.square(test_label.flatten() - pred_out[0].flatten()))) - -''' -from keras.engine.topology import Layer -class V_Theta_Layer(Layer): - def __init__(self, **kwargs): - super(MyLayer, self).__init__(**kwargs) - - def build(self, input_shape): - # Create a trainable weight variable for this layer. - self.kernel = self.add_weight(name='epsilon', - shape=(input_shape[1], ), - initializer=RandomNormal(mean=0., stddev=lamda_v), - regularizer=l2(lamda_v), - trainable=True) - super(MyLayer, self).build(input_shape) # Be sure to call this somewhere! - - def call(self, x): - return x + self.kernel -''' \ No newline at end of file diff --git a/README.md b/README.md index 716e252..9e57c2c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,12 @@ # Keras-CDL -keras implementation of Collaborative Deep Learning +keras implementation of Collaborative Deep Learning(Modified version) +## Tuning ``` python main.py ``` + +## Make prediction +``` +python make-prediction.py +``` diff --git a/config.json b/config.json new file mode 100644 index 0000000..998a1c3 --- /dev/null +++ b/config.json @@ -0,0 +1,6 @@ +{ + "users_path": "./data/users.csv", + "items_path": "./data/items.csv", + "train_path": "./data/train.csv", + "predict_path": "./data/test.csv" +} \ No newline at end of file diff --git a/main.py b/main.py deleted file mode 100644 index 89d849b..0000000 --- a/main.py +++ /dev/null @@ -1,20 +0,0 @@ -import logging -from utils import read_rating, read_feature -from CDL import CollaborativeDeepLearning - -def main(): - logging.info('reading data') - train_mat = read_rating('data/ml-1m/normalTrain.csv') - test_mat = read_rating('data/ml-1m/test.csv') - item_mat = read_feature('data/ml-1m/itemFeat.csv') - num_item_feat = item_mat.shape[1] - - model = CollaborativeDeepLearning(item_mat, [num_item_feat, 16, 8]) - model.pretrain(lamda_w=0.001, encoder_noise=0.3, epochs=10) - model_history = model.fineture(train_mat, test_mat, lamda_u=0.01, lamda_v=0.1, lamda_n=0.1, lr=0.01, epochs=3) - testing_rmse = model.getRMSE(test_mat) - print('Testing RMSE = {}'.format(testing_rmse)) - -if __name__ == "__main__": - logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s') - main() \ No newline at end of file diff --git a/model.png b/model.png new file mode 100644 index 0000000..de00bbe Binary files /dev/null and b/model.png differ diff --git a/notebook/plot-loss-graph.ipynb b/notebook/plot-loss-graph.ipynb new file mode 100644 index 0000000..f8b3c78 --- /dev/null +++ b/notebook/plot-loss-graph.ipynb @@ -0,0 +1,127 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 66, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABHgAAALICAYAAAAE6EcMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzs3WmYndV5p/t71aya59JQGhEIMUmAEBCwjYPBxnYMSWxix84hifuQTpwc98mx27iPk7RPdzqku5O204ntJrYTHA8dB8cGz2AMxgODBcZmkEAIJFVJqlFVpZqkmtb5sLdABoE11N6v6n3v33Xp2nPtf/Hhqc2z13pWiDEiSZIkSZKkhask6QCSJEmSJEk6OTZ4JEmSJEmSFjgbPJIkSZIkSQucDR5JkiRJkqQFzgaPJEmSJEnSAmeDR5IkSZIkaYGzwSNJkiRJkrTA2eCRjlMI4Q9DCFtCCIdCCP+YdB5J0vwLIVSGED4VQtgVQhgNITwaQrgm6VySpMIIIXw2hLAvhHAghPB0COHfJJ1JOl42eKTjtxf4z8Cnkw4iSSqYMqALeA3QAHwI+GIIYVWCmSRJhfMXwKoYYz3wFuA/hxAuTDiTdFxs8EjHKcb4rzHGrwCDSWeRJBVGjHE8xvgfY4w7Y4xzMcavAc8BftiXpBSKMT4RYzx0+Gb+32kJRpKOmw0eSZKkXyCE0AGcATyRdBZJUmGEED4WQpgAtgH7gG8kHEk6LjZ4JEmSXkEIoRz4HHBrjHFb0nkkSYURY/wDoA54FfCvwKFXfoV0arHBI0mS9DJCCCXAPwFTwB8mHEeSVGAxxtkY4w+ATuD3k84jHY+ypANIkiSdikIIAfgU0AG8McY4nXAkSVLxlOEMHi0wruCRjlMIoSyEUAWUAqUhhKoQgs1SSUqfjwPrgV+JMU4mHUaSVBghhPYQwttDCLUhhNIQwuuBdwB3J51NOh4hxph0BmlBCSH8R+DPXnT3h2OM/7H4aSRJhRBCWAnsJDd/YeaIh34vxvi5REJJkgoihNAG3AZsILcIYhfwNzHGv080mHScbPBIkiRJkiQtcG7RkiRJkiRJWuBs8EiSJEmSJC1wNngkSZIkSZIWOBs8kiRJkiRJC9yCONq5tbU1rlq1KukYknTKe/jhhwdijG1J5zhR1ntJOnbWfEnKhmOt9wVr8IQQ1gH/fMRda4A/BT6Tv38VueNHr48xDr3Sz1q1ahVbtmwpTFBJSpEQwq6kM5wM670kHTtrviRlw7HW+4Jt0YoxPhVj3Bhj3AhcCEwAXwZuAu6OMZ4O3J2/LUmSJEmSpBNUrBk8VwI7Yoy7gGuBW/P33wpcV6QMkiRJkiRJqVSsBs/bgS/kr3fEGPflr/cAHUd7QQjhxhDClhDClv7+/mJklCRJkiRJWpAKPmQ5hFABvAX44IsfizHGEEI82utijLcAtwBs2rTpqM+RpBebnp6mu7ubgwcPJh2loKqqqujs7KS8vDzpKJKUiKzUe7DmS1JWav7J1vtinKJ1DfBIjLE3f7s3hLAkxrgvhLAE6CtCBkkZ0d3dTV1dHatWrSKEkHScgogxMjg4SHd3N6tXr046jiQlIgv1Hqz5kgTZqPnzUe+LsUXrHbywPQvgDuCG/PUbgNuLkEFSRhw8eJCWlpbUFn6AEAItLS2p/wZDkl5JFuo9WPMlCbJR8+ej3he0wRNCqAGuAv71iLtvBq4KIWwHXpe/LUnzJs2F/7As/I6S9ItkpRZm5feUpFeShVp4sr9jQbdoxRjHgZYX3TdI7lQtSZIkSZIkzYNinaIlSZkwPDzMxz72seN+3Rvf+EaGh4cLkEiSVAhZrfchhHUhhEeP+HcghPDvQgjNIYS7Qgjb85dNSWeVpPmyUGq+DR5JmkcvV/xnZmZe8XXf+MY3aGxsLFQsSdI8y2q9jzE+FWPcGGPcCFwITABfBm4C7o4xng7cnb8tSamwUGp+MU7RkqTMuOmmm9ixYwcbN26kvLycqqoqmpqa2LZtG08//TTXXXcdXV1dHDx4kPe+973ceOONAKxatYotW7YwNjbGNddcw+WXX86PfvQjli1bxu23386iRYsS/s0kSUey3gO5sQs7Yoy7QgjXAlfk778VuBf4QEK5JGleLZSan+oGz+/+449Z0lDFn//quUlHkZSAD3/1CZ7ce2Bef+ZZS+v5s185+2Ufv/nmm3n88cd59NFHuffee3nTm97E448//vxRh5/+9Kdpbm5mcnKSiy66iF//9V+npeXnRpWxfft2vvCFL/D3f//3XH/99XzpS1/iXe9617z+HmnzhYd287fffYZ7338F5aUuTpWyxnqfmLfzwmm5HTHGffnrPUDH0V4QQrgRuBFgxYoVx/2GfaMH+Y3/9QD/7nWnc+3GZcefWNKCZ81/ean+FHxgcprnBsaTjiEpwzZv3vx84Qf4m7/5GzZs2MAll1xCV1cX27dvf8lrVq9ezcaNGwG48MIL2blzZ7HiLlhzMbJneJLBsamko0jKqKzV+xBCBfAW4F9e/FiMMQLxaK+LMd4SY9wUY9zU1tZ23O9bXVHGcwPj9B7w2HhJyTlVa36qV/C01FbY4JEy7JW68MVSU1Pz/PV7772X73znO9x///1UV1dzxRVXcPDgSz+gVlZWPn+9tLSUycnJomRdyFprc//NBsYOsbihKuE0korNep+Ia4BHYoy9+du9IYQlMcZ9IYQlQF8h3rSmopSyksDQxHQhfrykBcCa//JSvYKntbbSb3MlFVVdXR2jo6NHfWxkZISmpiaqq6vZtm0bDzzwQJHTpdfhBk//2KGEk0jKCus97+CF7VkAdwA35K/fANxeiDcNIdBYXcHwhJ/xJRXPQqn5KV/BU8n+iSlm5yKlJSHpOJIyoKWlhcsuu4xzzjmHRYsW0dHxwgiCN7zhDXziE59g/fr1rFu3jksuuSTBpOnSdngFz6gNHknFkeV6H0KoAa4Cfu+Iu28GvhhCeDewC7i+UO/fVF3O0LgreCQVz0Kp+alu8LTWVhAj7B+foq2u8he/QJLmwec///mj3l9ZWck3v/nNoz52eA9ua2srjz/++PP3v+9975v3fGnUWlcBwICrNiUVUVbrfYxxHGh50X2D5E7VKrim6gqGJ633koprIdT81G/RAhgc9xtdSUqz6ooyqitKGXCLliSlXkN1OcPO4JGkl0h1g6elJveNrnN4JCn9WmsrbfBIUgY0VZcz5AweSXqJdDd4jjhVRZKUbq21FdZ7ScqApuoKhiamyZ3GLkk6LNUNntZaZzJIUla01lYyMGq9l6S0a6yuYGpmjsnp2aSjSNIpJdUNnoZF5ZSVBAb9RleSUq+1zi1akpQFjdXlAM7hkaQXSXWDJ4RAS22FM3gkKQNaayvZPzHFzOxc0lEkSQXUlG/wOIdHkn5eqhs8AC01fqMrqXiGh4f52Mc+dkKv/chHPsLExMQ8J8qOttoKYoT9fuCXVATW++Q0VufGMLiCR1KxLJSan/oGT2tdJQPjftiXVBwLpfinUevhwfrO4ZFUBNb75DTlGzyu4JFULAul5pcV5V0S1FpTwbP9Y0nHkJQRN910Ezt27GDjxo1cddVVtLe388UvfpFDhw7xq7/6q3z4wx9mfHyc66+/nu7ubmZnZ/mTP/kTent72bt3L6997WtpbW3lnnvuSfpXWVgGnmFd7z3AEldtSioK631yXtii5QoeScWxUGp+6hs8zuCRMuybN0HPY/P7MxefC9fc/LIP33zzzTz++OM8+uij3Hnnndx222089NBDxBh5y1vewn333Ud/fz9Lly7l61//OgAjIyM0NDTw13/919xzzz20trbOb+YCCyGsA/75iLvWAH8KfCZ//ypgJ3B9jHGoICGe/iZrfvAh6vikDR4pi6z3mdKQb/CMuIJHyiZr/stK/RatltpKJqdnGT80k3QUSRlz5513cuedd3L++edzwQUXsG3bNrZv3865557LXXfdxQc+8AG+//3v09DQkHTUkxJjfCrGuDHGuBG4EJgAvgzcBNwdYzwduDt/uzDqlwKwOOy3wSOp6LJS708VlWWlVFeUuoJHUiJO5Zqf+hU8h2cyDI5NUVOZ+l9X0pFeoQtfDDFGPvjBD/J7v/d7L3nskUce4Rvf+AYf+tCHuPLKK/nTP/3TBBIWxJXAjhjjrhDCtcAV+ftvBe4FPlCQd61fBsCKsiEGXLUpZY/1PnOaqiucwSNllTX/ZWVgBU9uCNvAuN/oSiq8uro6RkdHAXj961/Ppz/9acbGcnPA9uzZQ19fH3v37qW6upp3vetdvP/97+eRRx55yWsXsLcDX8hf74gx7stf7wE6Cvau+RU8aysPMDBqvZdUeNb7ZDVWl3uKlqSiWSg1P/VLWlprDp+q4gd+SYXX0tLCZZddxjnnnMM111zDb/7mb3LppZcCUFtby2c/+1meeeYZ3v/+91NSUkJ5eTkf//jHAbjxxht5wxvewNKlSxfk0M0QQgXwFuCDL34sxhhDCPFlXncjcCPAihUrTuzNaxcDgZUVIzzpFi1JRZDlen8qyDV4XMEjqTgWSs0PMR718/YpZdOmTXHLli0n9Np9I5Nc+hff5S9+7VzesfkE/8dB0oKxdetW1q9fn3SMojja7xpCeDjGuCmJPPktWe+JMV6dv/0UcEWMcV8IYQlwb4xx3Sv9jJOp9/y30/leySZuLvsDvvneV53Yz5C0YGSp3sOpV/Pnw8nU/Pd8/hG27j3Ad993xfyGknRKylLNP5l6n/otWs01uS1ag36jK0mF9g5e2J4FcAdwQ/76DcDtBX33+qV0RIcsS1IWNFWXO4NHkl4k3Vu0nrydyvIa6qrKHLopSQUUQqgBrgKOnDZ3M/DFEMK7gV3A9QUNUb+MlpGn2D8+xexcpLQkFPTtJEnJaaquYGRymrm5SIn1XpKAtDd47vvvULeY1tr3+I2ulCExRkJI94e9U217bYxxHGh50X2D5E7VKo76pdRPfZ/ZucjQxNTzpyhKSq8s1Hs49Wr+qaCxuoK5CAcOTtNYXZF0HElFkIWaf7L1Pt1btOqXwug+WmsrGHQFj5QJVVVVDA4OpvrDcIyRwcFBqqqqko5yaqlfSuXMAao4ZFNfyoAs1Huw5r+cxkXlAJ6kJWVEFmr+fNT7dK/gqVsM3VtoWVLJswNjSaeRVASdnZ10d3fT39+fdJSCqqqqorOzM+kYp5b8UemLw34GRqdgccJ5JBVUVuo9WPOPpqkm1+AZmphiFTUJp5FUaFmp+Sdb71Pe4FkKEwO0V8NDruCRMqG8vJzVq1cnHUNJyDd4lgQHLUtZYL3PtsPbslzBI2WDNf/YpHuLVl3u69sVFWMMTUwxMzuXcCBJUsHULwNgMTZ4JCntmvINHk/SkqQXpLvBk/82t7N0iBhhyA6/JKVX3RIgV/P7bfBIUqo5g0eSXirdDZ78h/32MATA4Lgf+CUptSqqYVETK8tHcjN4JEmpVb+onBBg2BU8kvS8TDR4mucGAfzAL0lpV7eUztIht2hJUsqVlgQaFpW7Ql+SjpDuBk91M5RW0DAzALiCR5JSr34pHWHQBo8kZUBTdYUzeCTpCOlu8IQAdYupOZQ7Sm3Ak7QkKd3ql9I6Z4NHkrKgYVG5M3gk6QjpbvAA1C2lfKKH8tLgB35JSrv6ZdTN7OfA2ARzczHpNJKkAmqqLmd40i9wJemw9Dd46pcQRntoqalkYNQGjySlWv70xJa4nwG35UpSqjVVVzA07goeSTos/Q2euiVwYB/tdRX02eCRpHSrzw3X72A/vSPWfElKs8bqCk/RkqQjZKPBMz3OqrpZeg8cTDqNJKmQ6pcBsCTsZ9/IZMJhJEmF1FRdzvjULFMzc0lHkaRTQjYaPMBpVaM2eCQp7fJbtBaH/fRY8yUp1RqrywGcwyNJeelv8OSX668sG2FoYpqD07MJB5IkFUxlPbGilmUl+9k3YoNHktKssboCwJO0JCkv/Q2e/AqeJWXDAPQ7h0eS0isEQv1SVpYP02uDR5LS58A++NvN8NhtNOUbPEPjruCRJMhQg6ct7gdwyb4kpV3dEpaVDruCR5LSqKoeBp6C4d3Pb9EacgWPJAFZaPBUVENVA00zAwDO4ZGktKtfRlsctKEvSWlUUQMVdTDW98IMHk/SkiQgCw0egLol1EzlGjw9fqMrSelWv5TGmUH6RsaIMSadRpI032rbYazn+S1aw5Ou4JEkyFCDp3yih8qyEvqcwSNJ6da4ghJmaZge5MDkTNJpJEnzrbYDxvqoriilorSEIVfwSBJQ4AZPCKExhHBbCGFbCGFrCOHSEEJzCOGuEML2/GVTITMAUL+UMNpDR32VK3gkKe0alwPQGfrZd2Ay4TCSpHlX1wFjvYQQaKwuZ3jcFTySBIVfwfNR4FsxxjOBDcBW4Cbg7hjj6cDd+duFVbcYRntYUlfuDB5JSrvGlUC+wWNTX5LSp7YDRnsBaKqucAWPJOUVrMETQmgAXg18CiDGOBVjHAauBW7NP+1W4LpCZXhe3RKIs5xWM+kWLUlKu4ZOADrDgKs2JSmNajtgahSmxmmoLncGjyTlFXIFz2qgH/iHEMJPQgifDCHUAB0xxn355/QAHQXMkJM/Kn115Sg9IwcduilJaVZWSaxdzLISGzySlEq1+f99GOujqbrcU7QkKa+QDZ4y4ALg4zHG84FxXrQdK+Y6LUfttoQQbgwhbAkhbOnv7z+5JPW5Bs+KsmEmp2cZPeTQTUlKs9C4gjVlgzZ4JCmNnm/w9Oa3aLmCR5KgsA2ebqA7xvhg/vZt5Bo+vSGEJQD5y76jvTjGeEuMcVOMcVNbW9vJJalbCsDikiEAev3AL0np1rgiP2TZei9JqVP3QoOnsbqC4YkpV+hLEgVs8MQYe4CuEMK6/F1XAk8CdwA35O+7Abi9UBmeV9MGoYTWuf0A9B5wDo8kpVrjctrmBugbHks6iSRpvh2xRauxupzp2cj41GyymSTpFFBW4J//R8DnQggVwLPA75BrKn0xhPBuYBdwfYEzQGkZ1HZQP51bLNTjN7qSlG6NKyhjhtkDPUknkSTNt+oWCCUw2kNTfTkAwxNT1FYW+n9tJOnUVtAqGGN8FNh0lIeuLOT7HlVDJ9UHc8cpelS6JKVc4woAGg7tY/zQDDV+6Jek9CgphZr23BatxRUADE9M09mUcC5JSlghZ/CcWho6KT3QRX1VmQ0eSUq7xpVA/qh0a74kzasQQmMI4bYQwrYQwtYQwqUhhOYQwl0hhO35y8K2W2rbnx+yDDDkSVqSlKUGz3IY6WZxXYUNHklKu4ZOAJYFj0qXpAL4KPCtGOOZwAZgK7nTcu+OMZ4O3M2LTs+dd3WL8w2e3Bat/eM2eCQpOw2exhUwO8UZdQfpcciyJKVb+SJmq9voDP02eCRpHoUQGoBXA58CiDFOxRiHgWuBW/NPuxW4rqBBatthrI/2+ioA+kf9fC9J2Wnw5L/NPb1imD5X8EhS6oX8Uelu0ZKkebUa6Af+IYTwkxDCJ0MINUBHjHFf/jk9QMfRXhxCuDGEsCWEsKW/v//EU9R2wFgf9RWBqvISV+hLEplq8CwHYHX5IH2jh5idiwkHkiQVUknTClaWDrBvZDLpKJKUJmXABcDHY4znA+O8aDtWjDECR/2wHWO8Jca4Kca4qa2t7cRT1C6GOEuYHKKjvopeV+hLUoYaPI25Bs/SMMjsXGRw3D8CkpRqjStYzAC9wxNJJ5GkNOkGumOMD+Zv30au4dMbQlgCkL/sK2iK2vbc5VgvHXVVruCRJLLU4KlqgMp62uZyf2v67PJLUro1rqCCGQ6N7PvFz5UkHZMYYw/QFUJYl7/rSuBJ4A7ghvx9NwC3FzRIbX4H2FgP7fWV9DmDR5Iy1OABaFhO41QPgEM3JSnt8kell4x0JRxEklLnj4DPhRB+BmwE/gtwM3BVCGE78Lr87cKpO9zg6ctv0TpIbmeYJGVXWdIBiqpxOdVDuQ/6vaM2eCQp1fKz1+oP7mPs0Ay1ldn6kydJhRJjfBTYdJSHrixaiOdX8PTSUV/JxNQsY4dmqKsqL1oESTrVZG4FT9loNyFAryt4JCnd8rPXOsMAuwbHEw4jSZpXFTVQUQejvXTkj0p30LKkrMtYg6eTcHCE1bWz7LXBI0npVlHDTFULnaGf3YMOWpak1Klth7Fe2usON3j8fC8p27LV4Ml/m7uhbpTuIT/sS1LahcbldIZ+dtrgkaT0qe3Iz+CpBGzwSFK2GjwNKwBYt2iEPcOTCYeRJBVaafNKVpQOsnu/W7QkKXXqOvKnaLlFS5Igcw2eTgDWlO9n3/BBZuectC9Jqda4kmX0sXtgNOkkkqT5ll/BU1tZRm1lmSt4JGVetho8tR1QWsGyMMDMXPSPgCTNkxBCYwjhthDCthDC1hDCpSGE5hDCXSGE7fnLpqIHa15DOTNMDnYX/a0lSQVW2w6HDsDUBO31lfR5Sq6kjMtWg6ekBOqX0TrbB+A2LUmaPx8FvhVjPBPYAGwFbgLujjGeDtydv11czWsAqBrdxaGZ2aK/vSSpgGoX5y7HellcX+UWLUmZl60GD0BDJ/WHegDYM2SDR5JOVgihAXg18CmAGONUjHEYuBa4Nf+0W4Hrih4u3+BZGXrotuZLUrrUduQux/roqK9ydb6kzMteg6dxBVXjewE8SUuS5sdqoB/4hxDCT0IInwwh1AAdMcZ9+ef0AB1He3EI4cYQwpYQwpb+/v75TVa/jLnSClaGXnYNOmhZklKl7nCDpze3RevAIWJ0xqak7Mpeg6dhOWGsh8U1wS1akjQ/yoALgI/HGM8HxnnRdqyY+8R91E/dMcZbYoybYoyb2tra5jdZSQlzDatYFXrZ5VHpkpQutS80eDrqqpianWN4YjrZTJKUoAw2eDqByHn14y7Xl6T50Q10xxgfzN++jVzDpzeEsAQgf9mXRLjS1jWsLrHBI0mpU90CoSTX4Dl8VLqDliVlWPYaPI3LATiresQZPJI0D2KMPUBXCGFd/q4rgSeBO4Ab8vfdANyeQDxC82m5LVoDY0m8vSSpUEpKoaY93+CpBHDQsqRMK0s6QNE15Bo8p1UMs2d4khgjIYSEQ0nSgvdHwOdCCBXAs8DvkPsS4YshhHcDu4DrE0nWsoYqDjE22A1cnEgESVKB1LbD6BEreBy0LCnDstfgqV8GQGfJAIdm5hgYm6KtrjLhUJK0sMUYHwU2HeWhK4ud5SXyJ2mVj+xidi5SWmJTX5JSo7YDxnqf/zzfZ4NHUoZlb4tWeRXULaFjthfwJC1JSr18g2dZ3EePH/wlKV3qOmCsj6ryUhqry92iJSnTstfgAWheQ+PBLgBP0pKktKvvZK6knFWhh10DHpUuSalS2wHjfTA3R0ddlVu0JGVaRhs8q6ka3QXgSVqSlHalZczWr8gNWt7vqk1JSpXaxTA3A5P7aa+vpHfUFTySsiujDZ41lIz3sbhqxpO0JCkDylpP86h0SUqj2vbcZf6odGfwSMqyzDZ4AC6sG3aLliRlQGhZwyqPSpek9KntyF2O9tBRX0nf6CHm5mKymSQpIdls8DStBuCc6v0OWZakLGheQw2TDA/sSzqJJGk+1S/JXY7uo6O+itm5yOD4VLKZJCkh2WzwNOcaPGvL+tgzNEmMdvklKdXyKzfD0LPWfElKk7qlQIDhLtrrqgActCwps7LZ4KlqgOpWlscexqdmGZmcTjqRJKmQ8g2exTN76XcApySlR1kF1C2BkS466isB6Bu1wSMpm7LZ4AFoXkPb9B7Ak7QkKfUalhNDKStLennWo9IlKV0al8PwbjrqD6/gsZEvKZsy3eCpm+gCbPBIUuqVVTBb18mq0MtzNngkKV0alsNIF211uRU8btGSlFWZbvBUjO+lkikHLUtSBpS2nsaa0MNOGzySlC6Ny2FkD+Uh0lpb4QoeSZmV4QZPbtDymZX7XcEjSRkQWteypmQfz/Z7VLokpUrDcpibhtEe2uuq6HMFj6SMynCDJzdw84LaIXbvdwWPJKVe6xnUMMlof1fSSSRJ86lxRe4yP2i51yHLkjIq8w2es6oGbPBIUha0rQOgavgZZuc8Kl2SUuNwg2e4i476KrdoScqs7DZ4FjVBVQNrSvvYvX+COT/sS1K6teYaPCtjN3uH3ZorSanR0Jm7HNlNe30VA2OHmJmdSzaTJCUguw2eEKB5DUtm9zE1M0ffqJ1+SUq12nZmKupZG/Z4kpYkpUlFDVS35FfwVBIjDIxNJZ1Kkoouuw0egOY1NB3KzWLYNeiHfUlKtRCILWewNuy1wSNJaZM/Kr2jrgqAHgctS8qgbDd4mlZTNb6HcmacwyNJGVDWcSanl7iCR5JSp3E5DHexpDHX4NnnVlxJGZTtBk/zGkKcY3lJP102eCQp9ULbOlrDCL19PUlHkSTNp4YVMNLF8qZFAH55KymTMt/ggdxR6bv8IyBJ6Zc/SSv0P51wEEnSvGpcDtMT1M+N0lRdboNHUibZ4AHOrd7vHwFJyoLWMwCoH9vB1IwnrEhSajQsz12O7GZ5c7Wf7SVlUrYbPLXtUFnPGaX72D3oHwFJSr3GFcyWVLIm7PXDvySlSWO+wTPcxfLmarqHnMEjKXuy3eAJAdrXs3JmF4PjU4wdmkk6kSSpkEpKOdS4hrVhDzsdtCxJ6fH8Cp4uVjRX0z00wexcTDaTJBVZths8AO3raZ3cAUQHLUtSBpS1n8na4ElakpQqi5qgohaGcw2e6dnoUemSMscGT/tZVEyN0MYwu9ymJUmpV7H4TDpLBtjdtz/pKJKk+RJCbhXPSBfLm6oBHMEgKXMK2uAJIewMITwWQng0hLAlf19zCOGuEML2/GVTITP8Qm1nArCupNsVPJKUBa1nUEJkqndb0kkkSfOpcTkM72ZFc67B0zXkZ3tJ2VKMFTyvjTFujDFuyt++Cbg7xng6cHf+dnLazwJgQ8Vedu13ub4kpV7+qPSSge3E6HwGSUqN/AqeJY1VlJYEv7yVlDlJbNG6Frg1f/1W4LoEMrygtg2qW9lQuY/d+522L0mp17KWOUpYPL2L/rFDSaeRJM2XxuUwOUT5zDhLGqo8LVFS5hS6wROBO0MID4cQbszf1xFj3Je/3gN0HO3Zvr7XAAAgAElEQVSFIYQbQwhbQghb+vv7C5uyfT2nhy67/JKUBWWVHKpbztqwh237RpNOI0maLw0vHJW+ornaBo+kzCl0g+fyGOMFwDXAe0IIrz7ywZhbG3/U9fExxltijJtijJva2toKm7L9LJZO7WLP0JjHKUpSBpR2nMUZYQ9P9djgkaTUaFyRu8wfld7l6nxJGVPQBk+McU/+sg/4MrAZ6A0hLAHIX/YVMsMxaV9P5dwE7XMD7BvxD4EkpV3FsvNYU7KPZ/YWeIWoJKl4nl/Bs5vlzdUMjB1iYmom2UySVEQFa/CEEGpCCHWHrwNXA48DdwA35J92A3B7oTIcs/yg5TNCl8cpSlIWdJxDKXMc3PN40kkkSfOltgNKK3JHpR8+SctVPJIypJAreDqAH4QQfgo8BHw9xvgt4GbgqhDCduB1+dvJas8flR662eVeXUlKv8XnAlA3tJXp2bmEw0iS5kVJCdQve34GD+AcHkmZUlaoHxxjfBbYcJT7B4ErC/W+J6SqgVi/jPXD3TzWP5Z0GklSoTWuZLqsltNndrJzYJzTO+qSTiRJC0YIYScwCswCMzHGTSGEZuCfgVXATuD6GONQ0cM1Ln9+Bg/Y4JGULUkck35KCu3rObt8D9v7bPBIUuqVlDDdup6zSnax1UHLknQiXhtj3Bhj3JS/fRNwd4zxdODu/O3ia1gBw100VZdTW1nmKbmSMsUGz2Ht61k5182zPSNJJ5EkFUFl50bWhy627R1OOookpcG1wK3567cC1yWSonEFjPUQZqfobFpkg0dSptjgOaz9LMriNGUHdjptX5IyoHTpedSGSQa7tycdRZIWmgjcGUJ4OIRwY/6+jhjjvvz1HnLzOF8ihHBjCGFLCGFLf38BTjJszJ+kNdLNiuZqt2hJyhQbPIe1rwdgXehiR994wmEkSQXXcQ4AZX2epCVJx+nyGOMFwDXAe0IIrz7ywRhjJNcEeokY4y0xxk0xxk1tbW3zn6xxRe5yeBcrmqvpGpogF0eS0s8Gz2FtZxJDKWeV7OKZfucxSFLqta9njlLaJ7czMjmddBpJWjBijHvyl33Al4HNQG8IYQlA/rIvkXCtZ+Qu+59ieXM1B6fn6B87lEgUSSo2GzyHlS+CtjPZUPIs23sdtCxJqVe+iImGNZwVdvGUg5Yl6ZiEEGpCCHWHrwNXA48DdwA35J92A3B7IgFr2mBRM/Rtff4kLefwSMoKGzxHCMsuYGPpczzT6wd9ScqC0iXnsr5kN0/1HEg6iiQtFB3AD0IIPwUeAr4eY/wWcDNwVQhhO/C6/O3iCyE3eiG/ggc8Kl1SdtjgOdLS86mPo4z37kg6iSSpCKo6N9IZBtjZvSfpKJK0IMQYn40xbsj/OzvG+Of5+wdjjFfGGE+PMb4uxrg/sZBt66B/K52NVQDsHpxMLIokFZMNniMtuwCAppEnODQzm3AYSVo4Qgg7QwiPhRAeDSFsyd/XHEK4K4SwPX/ZlHTOFwuLc4OWD3X/NOEkkqR507YeDo5QdbCfjvpKuoZcwSMpG2zwHKn9bGZLyjk37GDngH8IJOk4vTbGuDHGuCl/+ybg7hjj6cDd+dunlsXnAlAztJXZOU9ZkaRUaD8zd9m/zaPSJWWKDZ4jlVVwqOUszgvPsb3POTySdJKuBW7NX78VuC7BLEdX285kZRtnxOd4bsAB+5KUCm0vNHiWN1c7ZFlSZtjgeZGKFZs4p+Q5nul14KYkHYcI3BlCeDiEcGP+vo4Y47789R5ygzlPOTMd53FOeI4n9lr3JSkVjjhJa3lTNT0HDjp+QVIm2OB5kbLOC6gLkxzYsy3pKJK0kFweY7wAuAZ4Twjh1Uc+GGOM5JpALxFCuDGEsCWEsKW/v78IUX9e9erNnB728EzXvl/8ZEnSqe+Ik7RWNFcTI3Ttd9CypPSzwfNiS3ODlhf1PZpwEElaOGKMe/KXfcCXgc1AbwhhCUD+su9lXntLjHFTjHFTW1tbsSI/r3T5JkpC5NCuLUV/b0lSgeRP0jqjvRaAp3ocvyAp/WzwvFjbOqZKqmgf3crM7FzSaSTplBdCqAkh1B2+DlwNPA7cAdyQf9oNwO3JJPwFll0IQN3gT8ktNJIkLXj5k7TOqB2jrCTwxN6RpBNJUsHZ4HmxklJGGs/inLDDifuSdGw6gB+EEH4KPAR8Pcb4LeBm4KoQwnbgdfnbp55FTYxUr2TdzFP0HDiYdBpJ0nzIn6RVuf9p1rbX8rhz1iRlQFnSAU5Fccn5nD34Ge7rGWJNW23ScSTplBZjfBbYcJT7B4Eri5/o+E0vuYCN4/fws+4RljQsSjqOJOlkPX+S1lOcvfSX+N7TfcQYCSEkm0uSCsgVPEfRsHYzVWGanh0/TTqKJKkI6k67hPYwzO6d25OOIkmaD0ecpHXOsnoGxqboGz2UdCpJKigbPEdRueIiAOLuhxJOIkkqhsqVmwGYtu5LUjo8f5LWNs5e2gDgHB5JqXdMDZ4QwntDCPUh51MhhEdCCFcXOlximtdwoKyZtqGfOHBTUuZkruYDdJzDdCinftCVm5KyI/X1vm0d9G9j/eLcyIUn9jiHR1K6HesKnt+NMR4gdzJKE/BbnKrDMudDCOxv2cR5c0+yb8SBm5IyJ1s1H6CsgoG69ayd2srI5HTSaSSpWNJd7/MnadVND7KqpZrHXcEjKeWOtcFzeBrZG4F/ijE+ccR9qVS6+pfoDAM8/dSTSUeRpGLLXM0HmFlyAeeG59jaPZh0FEkqlnTX+/bDg5a3cvayBp7wJC1JKXesDZ6HQwh3kiv+3w4h1AFzhYuVvPZzfhmA8e33JZxEkoouczUfoGHtpVSFafY9/XDSUSSpWNJd73/uJK16uocmGZlwlaak9DrWBs+7gZuAi2KME0A58DsFS3UKqFx6DqOhhup9DyYdRZKKLXM1H6B+7SUAzHT9OOEkklQ06a73R5yk9fyg5X1u05KUXsfa4LkUeCrGOBxCeBfwISDd1bGklO7a81g1/lMHLUvKmuzVfIDGlRwobaSu/9Gkk0hSsaS73v/cSVr1gIOWJaXbsTZ4Pg5MhBA2AP8PsAP4TMFSnSIOLr2Y1exl756upKNIUjFlsuYTAoPNF3D29OP0HnDAvqRMSH+9z5+k1VpTweL6Ko9Kl5Rqx9rgmYm5ZSzXAn8bY/w7oK5wsU4NdeuuAKDnse8mG0SSiiuTNR+g/LRXs7yknyeeeCzpKJJUDOmv9/mTtBjt4eyl9Q5alpRqx9rgGQ0hfJDc0YlfDyGUkNujm2rLz76EyVjB3M4fJh1FkoopkzUfoGPDVQAc2HZPwkkkqSjSX+8Xn5u73PcoZy+tZ0f/GJNTs8lmkqQCOdYGz28Ah4DfjTH2AJ3AfytYqlNEZeUinq5YT+v+R5KOIknFlMmaD1DecRYHShqo67k/6SiSVAzpr/dLN0JJOXQ9yFlLG5iLsLXHVTyS0umYGjz5gv85oCGE8GbgYIwxXftzX8ZAy4WsnN7B3MRw0lEkqSiyXPMpKaGn6ULOPPgzxg56lK6kdMtEvS9fBEs2QNdDLwxadpuWpJQ6pgZPCOF64CHgbcD1wIMhhLcWMtipomTl5ZQQ6Xvi3qSjSFJRZLnmA4TVr2JZGGDrkz9LOookFVRm6v3yi2HPw3TWl9GwqJwnHbQsKaWOdYvW/wtcFGO8Icb4fwCbgT8pXKxTx7LzXsNErGT08W8lHUWSiiWzNR9g6carAdj/pHN4JKVeNur98s0wc5DQ+5iDliWl2rE2eEpijH1H3B48jtcuaKcvbeHhknNo3Pu9pKNIUrFktuYD1Cw7m+HQQPWeHyUdRZIKLRv1fvnm3GV+m9a2faNMz84lm0mSCuBYC/i3QgjfDiH8dgjht4GvA98oXKxTRwiB/sWvpm16L7P9zyQdR5KKIbM1H4AQ2NNwIWsnHmV6xpNWJKVaNup9/VJoWA5dD3JuZyNTs3Ns2zeadCpJmnfHOmT5/cAtwHn5f7fEGD9QyGCnksbzrgFg75Y7Ek4iSYWX9ZoPMLfqcpaEQZ556vGko0hSwWSq3i/fDF0PcdGqJgAe2rk/4UCSNP+OeQlmjPFLMcY/zv/7ciFDnWou2HA+z84tYeapO5OOIklFkeWaD7D4vKsAGHjsroSTSFJhZabeL78YDuxhCYN0Ni3ix8/Z4JGUPmWv9GAIYRSIR3sIiDHG+oKkOsU0Vlfw/ZrNXD38TZiezB23KEkpY81/QdvqcxkITVTsvg94X9JxJGleZbLePz+H50E2rzqN7z3dT4yREEKyuSRpHr3iCp4YY12Msf4o/+pSWfhfwcyaK6lkitFtnqoiKZ2s+UcIgd3Nl3PW+ENMTE4knUaS5lUm633HOVBendumtbqZwfEpnh0YTzqVJM2r9E3JL5DVF17NZKyg/9H0zZ2TJL1U5dlvpC5MsvWBbycdRZJ0skrLYdmF0PUgF61qBnCblqTUscFzjM5dtZiHw9nUdbmCR5KyYO0lb+ZQLOfgE19POookaT4s3wz7fsZpDdBSU+GgZUmpY4PnGJWWBPa1XU7bVDdxcEfScSRJBVZZXc/TNRewcuA+4txc0nEkSSdr+cUQZwn7HmXTqiZ+bINHUsrY4DkO1ee+GYC+h76UcBJJUjFMnXY1nfSyY+vDSUeRJJ2szotyl/ltWl37J9k3MplsJkmaRzZ4jsP5523gp3NrCFtvTzqKJKkIVl366wD0/vgrCSeRJJ206mZoPQO6HmLz6twcnoecwyMpRWzwHIeljYt4oPJy2g88DsO7k44jSSqwlqWr2VF2Gk3d3006iiRpPizfDF0PctbiOmoqSt2mJSlVbPAcpwNr3gTA7BN3JJxEklQMA0t/mTOntzLQuyfpKJKkk7X8EpgcomxwGxesbOLHzw0lnUiS5o0NnuO07qzzeHxuFQd/6hweScqC1guvpSREdvzoy0lHkSSdrLVX5i6338lFq5p5qneU4YmpZDNJ0jyxwXOcLl3TwjdmN1PT9wiM+G2uJKXdmnMvo4dWFj3t/DVJWvDql0LHubD9Li5alZvDs2Wnq3gkpUPBGzwhhNIQwk9CCF/L314dQngwhPBMCOGfQwgVhc4wn9rqKnmy8bW5G1u/mmwYSVLBhZISdiy+hrMmtnBgcF/ScSRJJ+uMq2H3A5zfBuWlwTk8klKjGCt43gtsPeL2XwL/I8a4FhgC3l2EDPNq5Rnn8VRcwdyTnqoiSVnQfOm7KAtzPHvPZ5KOIkk6WadfDXGWqt3f47zORh6ywSMpJQra4AkhdAJvAj6Zvx2AXwZuyz/lVuC6QmYohEtPa+FrM5sJux+AA3uTjiNJKrAzz7uY7WEVdU//a9JRJEknq/MiWNT0/Byex7pHmJiaSTqVJJ20Qq/g+Qjw74G5/O0WYDjGeLiCdgPLjvbCEMKNIYQtIYQt/f39BY55fC5e3cJX536JQISffiHpOJKkAgshsLvzzZw2tY3R7m1Jx5EknYySUlj7Oth+F5euaWJmLvJj5/BISoGCNXhCCG8G+mKMD5/I62OMt8QYN8UYN7W1tc1zupPTVFNB9eIzeLLiPPjJZyHGpCNJkgpsyWXvYi4Gdt93a9JRJEkn6/SrYWKAiyt3Ul4a+NEzA0knkqSTVsgVPJcBbwkh7AT+N7mtWR8FGkMIZfnndAIL8iiqXzqthX+cvBz2Pwu7fph0HElSga1fdyaPlJ5Dy7NfsbEvSQvd2tcBgarnvsv5K5r4gQ0eSSlQsAZPjPGDMcbOGOMq4O3Ad2OM7wTuAd6af9oNwII8d/bVZ7Rxx/RFTJfVwiP/lHQcSVKBhRDoXfkWFs/sZXTHA0nHkSSdjOrm3Cye7Xdy+dpWntx3gKHxqaRTSdJJKcYpWi/2AeCPQwjPkJvJ86kEMpy0y9a20tLYyH2Vr4Ynb4eDI0lHkiQV2MpXvZ2DsZy++xbkny5J0pHOuBr2PsJrls0RI9z/7GDSiSTppBSlwRNjvDfG+Ob89WdjjJtjjGtjjG+LMR4qRob5VloS+M2LV/DR/ZfCzCQ8dtsvfpEkaUE7e/Vy7i57Fcu6vgoHDyQdR5ISEUIoDSH8JITwtfzt1SGEB0MIz4QQ/jmEUJF0xmNy+tUAnD2xhZqKUn7oNi1JC1wSK3hS422bOnkynEbvotPgJ27TkqS0CyHQe/pvUhUPMvmIpyhKyqz3AluPuP2XwP+IMa4FhoB3J5LqeC0+D2oXU/bMnVyypsUGj6QFzwbPSWivq+L1Zy/hHw++Gvb+BPY8knQkSVKBbbjkl3lsbhVTD3zSYcuSMieE0Am8Cfhk/nYgd5jK4eXstwLXJZPuOIUAp18FO77L5Wvq2Tk4wZ7hyaRTSdIJs8Fzkt558Qo+O3kZ02U1cP/fJR1HklRg569o5o7ya2g48DR0PZh0HEkqto8A/x6Yy99uAYZjjDP5293Aspd7cQjhxhDClhDClv7+/sImPRbn/BocOsDVIVfPXcUjaSGzwXOSLj2thbbWNr5Z8Xp44ssw0p10JElSAZWUBGbP+nVG4yJmHvxk0nEkqWhCCG8G+mKMD5/oz4gx3hJj3BRj3NTW1jaP6U7Q6iugaTVLn/kCrbUV/MgGj6QFzAbPSQohN2z5L/e/hgjw4CeSjiRJiUjN0M1j8LoNq/nS7Kso2foVGPfUFUmZcRnwlhDCTuB/k9ua9VGgMYRQln9OJ7AnmXgnoKQENv0OYdeP+LXOUX64Y5Do9ltJC5QNnnnwaxd00l/awRONV8DDt8Kh0aQjSVIS0jF08xhsXt3MV8vfQMncNDz86aTjSFJRxBg/GGPsjDGuAt4OfDfG+E7gHuCt+afdANyeUMQTs/GdUFrBW7mL/tFDbO8bSzqRJJ0QGzzzoLmmgtefs5i/GLoSDh2ARzxRS1K2pGro5jEoKy3htLM3cV88n/jAx2FqPOlIkpSkDwB/HEJ4htxMnk8lnOf41LTCWdexdu9XWcRB5/BIWrBs8MyTd1y0nB8eXMVA8wXwwMdhdjrpSJJUTCc8dPOUG7h5jK45ZwkfmbqWMDEIj3wm6TiSVFQxxntjjG/OX382xrg5xrg2xvi2GOOhpPMdt02/S8nUKL9d/zA/fMatt5IWJhs88+SSNS2sbKnmk/FaGNkNj34+6UiSVBQnO3TzlBu4eYx+aW0Lz1SexTPVG+GHfwMzC+//ZyRJeSsugbb1vLP0bu7fMcDk1GzSiSTpuNngmSclJYHfuGg5n9i3loMd58P3/qsf9iVlRfqGbh6DyrJS3njuEm4eexOM7oWf/u+kI0mSTlQIsOl36Zzcxprp7Xz1Z3uTTiRJx80Gzzx664WdlJWU8KXG34ED3fDwPyYdSZIKLrVDN4/BtRuX8Z2psxhuPAd+8D9gduYXv0iSdGra8BvE8mp+v/ZePv/g7qTTSNJxs8Ezj9rrqnjd+g7+avsS5lZeDvf9dwdvSsqyhT108xhsXt3M4vpFfK7ibTD0HDz2xaQjSZJOVFUD4dy3cvXsD9jVtZsn9o4knUiSjosNnnn2tk2d7J+Y5pG1fwjjffDQLUlHkqSiSd3QzV+gtCTwKxuW8NE9a5np2ADf/XOYPph0LEnSibrkDyibO8jvlH/HVTySFhwbPPPs1We00VRdzq3di+H0q+EHH4GJ/UnHkiQVyLUblzE1G7h35f+V25770P9KOpIk6US1r4cz3sDvVtzFt37yLGOH3HoraeGwwTPPyktLeOO5S7jryR4mXvOncGgU7vnzpGNJkgrk7KX1nNZWw993Lcs19u/7Kxv7krSQXfbvqJ0d4Y2z3+WORx22LGnhsMFTANduXMbB6Tnu7G+Gi94NWz4NPY8nHUuSVAAhBK7duIwHn9tP38UfhKlR+P5fJR1LknSiVlxC7NzMH1R+ky888CwxxqQTSdIxscFTAJtWNrGscRFfeXQPXPFBqGqEb34A/OMgSal03cZlhAC37qiGjb+Zm7+2/7mkY0mSTkQIhMvey5K5Xlb13sXPuh22LGlhsMFTACUlgV/ZsJTvbx9gcK4GfvlDsOsH8ORXko4mSSqAFS3VvOHsxfzT/bsYv+wDUFphY1+SFrJ1b2S2eS2/X/41PvfAzqTTSNIxscFTINedv5TZucg3HtsHF/42dJwL3/oPMDmUdDRJUgH829ecxoGDM3z+yZnc6s3t34ZtX0s6liTpRJSUUHr5ezkr7KTn0W/z5N4DSSeSpF/IBk+BnLm4nnUddXzl0b1QUgpv+Wju2PSvvy/paJKkAtiwvJFL17TwyR88y6FN/yd0nJNbxXNoLOlokqQTcd5vMFfTznsqvsr7/uWnTM3MJZ1Ikl6RDZ4Cuu78ZTy8a4jnBsZh2YXwmg/A47fBY7clHU2SVAC/f8Vp9B44xO0/7YM3/RUc2APfuznpWJKkE1FWScll7+Xi+BhNvT/k7+55JulEkvSKbPAU0K9dsIySALc93JW74/I/hs6L4Gt/DCPdyYaTJM27V53eytlL6/nEfTuY67wYzv8tuP9jnqQoSQvVRf8GGlbwl3W38bF7nubxPQ5clnTqssFTQB31VVyxrp0vPbyH2bkIpWXwa7fA3Az86+/B7EzSESVJ8yiEwL99zWk82z/Of/32U8TXfRgWNcEdfwRzs0nHkyQdr/IquPJP6Dy0nXcsetCtWpJOaTZ4CuxtF3bSc+Ag39/en7ujeQ28+a9zp2p958+SDSdJmndvOncJ77x4BZ/43g7+8r5+4htuhr2P5I5OlyQtPOe8FZZs4IOVt/FczyD/87vbk04kSUdlg6fArlzfQXNNBf+y5YgtWRveDptvhPv/1nk8kpQyJSWB/3TtObzrklyT5+aus4lrr4K7/xMM7046niTpeJWUwFX/iUUTe/mvyx/g77//LEPjU0mnkqSXsMFTYBVlJVy3cRl3Pdn7838IXv9fYMUvwe1/CD2PJRdQkjTvDjd5fuuSlfyv7z/H/ev/Q+6Br/3fEGOy4SRJx2/Na2DtVbx55PNUTY/w2Qd2JZ1Ikl7CBk8RvG1TJ1Ozc9z+6J4X7iwth7f9IyxqhM//Bgz5R0KS0iSEwJ/+ylksa1zEf39wknjln8Az34EHPp50NEnSibjq/6N0eoy/bPs2t96/i4PTzlaTdGqxwVME65fUc+6yBj5z/y4OHJx+4YG6Dnjnv8DUONz6KzCy5+V/iCRpwSkvLeHfvmYNj+we5v7WX4cz3wx3fgh2/iDpaJKk49VxFpz/Lq4eu53W8e0//+WtJJ0CbPAUyftev46uoQlu+PRDjB7Z5Fl8LvzWv8LEfvjMW2CsL7mQkqR597ZNy2mrq+Tv7n0Wrvs4NK+Gf/ltOLA36WiSpOP1ug/DokY+Uv0PfOq+Z5ibc9utpFOHDZ4iec0ZbfzPd1zAY90j/Pb/z959h7dZXQ8c/76SLNmW957xjO3sZbKBJAQIo2GGUfYqZRRKS1u66KTjV9pCKRQCFMKGsMoMEEhC9nJ2Yid2vPcesrbe3x83iRMyyHAi2zmf59FjIr3vqyvx+Prq6NxzXlhLl3O/FunJ41QmT0cNvHABtJT6b6BCCCF6VWCAkTvOzGB5cTMbGrxw9avg6oa3bgS3w9/DE0IIcSyCo9DO/xN53iLGt3zAkp2N/h6REELsIwGeU2jW8ASeuHYMGyvbuPe1ggMfTJsEN7wH3c3w3EyoWu+fQQohhOh1101IIyI4gCcXFUNcHlz2H6haC/+7B3w+fw9PCCHEsRh5Nb6Ms3ko4E3mL1rr79EIIcQ+EuA5xS4YkchDs/JYXNTIqt3NBz44aCLc9gWYrfDiRVD4sX8GKYQQoldZLSZumZzBwh0N7KzvhKGXwDm/ga1vw6JH/D08IYQQx0LTMFz8TwINXi6ueYyt1e3+HpEQQgAS4PGLGyalERtq4V9f7jr4wZjBcPtCiBsCb1wHyx6TlrpCCDEA3DApDbPJwLwVZeqOqQ/A2Bth6aOw4RW/jk0IIcQxis7CM/VBLjSu4fP3XsDtlWxMIYT/SYDHDwIDjNx5ViYrSppZW9Zy8AEhcXDzxzDsUlj4G5XC73Ge+oEKIYToNVFWM5eOTuLdgmrau92gaXDRPyBzOnx4P+xa6O8hCiGEOAaBZ/2Q1pBsrm/8Jz9/aSEujwR5hBD+JQEeP7luQhoxIRYeX3iILB4AczBc+QJM+zlsfBVeugRsTad2kEIIIXrVTZPTsbu9vLWuUt1hDICrXoLYIarocrXUXxNCiH7DZCbyhpeIMjq4bPdvuffVtRLkEUL4lQR4/CTIrLJ4lhU3sb78EFk8oL7dnfYQXPlfqNkAz86Ahh2ndqBCCCF6zbCkcMZnRDFvZRneva11A8Pg+rfBGg2vXgXNJX4doxBCiGMQPwzTxY8yxbiNobue5u5X1+P0eP09KiHEaUoCPH503cRBRFvN/OTtzYfeqrXX8Cvg5k/A44Dnz4MdH526QQohhOhVt0xOp6rVzpc76nvuDE2A698DdHj5Muis89v4hBBCHKMx18Ooa7nf9B72oi/56dub/T0iIcRpSgI8fhRsNvH4NWNwuLzMeXolP3prI01dh6m1kzIO7vgKojLhzevgg/vAZTu1AxZCCHHCzh0aT1J4IC8sLzvwgZhs+O58tR33lSvA3uaX8QkhhDhGmgYX/R0tNpdnrc+wYuM2PtxU4+9RCSFOQxLg8bOpg2NY+OOzuXtaFh9uquGWF9b2pO1/U3iKaqM+5YdQ8BI8fSaULj21AxZCCHFCTEYDN09JZ+XuZn7+7uYDU/lTxsE1r0BjEbx+Lbjt/huoEEKIo2e2wpx5BOHgtZDH+Ov7q6nvcPh7VEKI04wEePqAYLOJn87K49E5o9hS3d5TfPNQTGY493dw04fgdcO8i1U7danZIIQQ/cZtUzO5Z3oWr6+p5Nq5q2jY/0NA1gy4fC5UrIS3bwWf1HIQQoh+IS4P7YrnyfaV8rjvzzw8fzW6fpgvboUQ4iSQAE8fMntUEuPTo/jbZ0WqhVEux/wAACAASURBVO6RZJwJ966BGb+G3YvhyQmw6E/gcZ2SsQohhDh+RoPGT87P48nvjmVHbSez/72c4oaungOGXw4X/g2KPoHPfum/gQohhDg2eReiXfEcY7Rd3FT2EG+tPEzHXCGEOAkkwNOHaJrGb2YPpa3bxT8X7vz2EwKC4KwH4QcFMOwyWPJXeG4G1G09+YMVQghxwi4amcg7d03G49O5Zu5KCus6eh4cfwdMvAdW/wdWP+O/QQohhDg2wy6DS//DROMOkj67nfKGIzRTEUKIXiQBnj5mWFI4350wiJdXlVNU13l0J4XGwxXPwtWvqs4rc6fBssfA5zupYxVCCHHihiaF8eadEzEZDFwzdxVbq9t7HjzvD5B7ISx4CIo+9d8ghRBCHBPD6GvomPl3ztQ20fjsHFx2aY4ihDj5JMDTB/343FxCA03c/8YGOh3fslVrf0MuhrtXQ+4sWPgbePlS6JAK/kII0ddlxYbw1p2TsJpNXP/8aura99TkMRjhiucgYSS8eT2sf9Gv4xRCCHH0Iqbextaxv2esaz3VT14Mzq5vP0kIIU6ABHj6oEirmX9dM4bihi7ueqUAl+cYMnGs0XDVyzD7CahaC/+ZDMsfB+dRZgMJIYTwi0HRwbx823icbh8/emsjvr0dFc1WuPF/kHE2fHg/fPoz8Hr8O1ghhBBHZfjs+/lfxsOkdm6k5ZmLwd7m7yEJIQYwCfD0UWflxPLny0ewrLiJh97dfGwV+DUNxt4Idy6FpDHwxcPw2AhY8n/g6Pj284UQQvhFZmwIv5s9jBUlzcxdurvngaAI+O5bMPFuWP20ytBsr/LfQIUQQhy1i67/IX8Pf4jQ5s04/3sx2Jr9PSQhxAAlAZ4+bE5+Kg/MzOHdgmp+/u4W7K5jbJUbkw03vAe3fwmpE2HRI/D4KJXR4+o+OYMWQghxQubkp3DhiAQe/ayIzVX7fdNrNMGsP8Ol/4HqApWhufVd/w1UCCHEUTGbDFx/6308YPgpNBbhe3YGNBT6e1hCiAHopAV4NE0L1DRtjaZpmzRN26Zp2u/23J+hadpqTdOKNU17U9M088kaw0Bw3znZ3D0tizfWVjL738vYUXscGTgp+fDdN+CORZA8VmX0PD4KFv1ZFWUWQgjRZ2iaxp8vG0lcqIW7Ximgus1+4AGjvwvfXwrR2fD2LTD/Fmiv9s9ghRBCHJXkiCCuuOZWrnH+kvaOdnzPzYRdX/h7WEKIAeZkZvA4gRm6ro8CRgOzNE2bCPwV+Keu69lAK3DbSRxDv6dpGj+dlcfLt42nze7mkieXs2Br7fFdLHksXP8O3PIpJI6CJX+Bfw6Dd26HxqLeHbgQQojjFh4cwNM3jKPD4ebauauo+WaQJzoLbv0Mpv8SCj+Gf58BS/8OHqd/BiyEEOJbTc+N45ZrruIq3yPsdEbhe/Uq9JVPwrGUYhBCiCM4aQEeXdlbKj5gz00HZgBv77l/HnDpyRrDQHLm4FgW3H8mQxPDeHD+ZsqaTqDVYtpkuP5t+EEBjP+ear371ER4905o2f3t5wshhDjpRqZE8PJtE2i1ubjmUEEeYwCc/VO4dw1kTYcvfw/PnA11W/wzYCGEEN9q9qgkXvzh5fwp4TG+8I5F++wXuN6+U8onCCF6xUmtwaNpmlHTtI1AA/AFUAK06bq+t/1HFZB8mHO/p2naOk3T1jU2Np7MYfYb0SEWnrxuLEaDxg9e34DTc4w1eQ66YJaq53D/Jph0D2x/X30L/NGPoLO+dwYthBDiuI1OjeDl21WQ5+q5K6lqPcQHgMh0uOZVVYTZ3gLPzlC11nwn+DdCCCHESZEcEcQLd06nfObT/Mt7BaZtb+GZOwOaiv09NCFEP3dSAzy6rnt1XR8NpADjgbxjOHeuruv5uq7nx8bGnrQx9jfJEUE8OmcUW6rb+eunvbStyhoD5/1RBXrG3gQF8+Bfo+HzX0NzSe88hxBCiOOyN8jT3u3m6mdWUdF8mG95c86Hu1aqn188DP+dBfXbTu1ghRBCHBWjQeN7Zw9m7I3/xx3en2NrrsI392zY9p6/hyaE6MdOSRctXdfbgEXAJCBC0zTTnodSAKkMeYzOHRrPzZPT+e/yUl5aWXZsLdSPJDQBLv4H3LMGci+Alf+GJ8aqDwmrnoayZdDd0jvPJYQQ4qiNTo3gtTsmYnN5uOqZleyq7zz0gdZouOpluGwutJTAM2fBF78BZ9ehjxdCCOFXUwfH8P3b7uAK31/Z7kmG+TfD4r9IXR4hxHHRei048M0La1os4NZ1vU3TtCDgc1SB5ZuAd3Rdf0PTtKeBzbquP3Wka+Xn5+vr1q07KePsr5weL3e+vJ7FRY1cNCKRP10+gvCggN59ko5a2PwGbHgVmnf13G8JU7fAMIgbAsMug+xzISCwd59fCHHMNE1br+t6vr/Hcbxkvj+yHbUdXPfcalq7XUzLieXa8YNICA+ksK6T4oYuzh0azxnpUerg7hb44tew4RUIjoEp98EZt4PZ6t8XIYToNTLnDxybKtu49fnl/N7wLBf5FrEw/Epej7gTo9HA3dOzGZ0a4e8hCiH86Gjn+5MZ4BmJKqJsRGUKvaXr+u81TcsE3gCigA3A9bquH7Hth0z+h+bz6cxduptHPysiITyQZ2/MZ0hiWO8/ka6rdur126BhG3TUgKMDHO1QuQq6m8EcCrmzYMh3IHumfIAQwk9ksT/w1bU7eHV1OW+uraSh88A/nzEhFhb+6Cwigs09d1auhcV/gpKvwBqr2qyPmAPxw0HTTvHohRC9Seb8gWVHbQc/eWsDt3Q+wxWej1lgOY+HPbfR6vDxs1l53DY1A03mbSFOS34P8PQmmfyPrKCilbtfKcDu9jLv1vGnNsLv9UDZ17D1XdWq194CpiCV2ROaqLZ9ma2q24vBBC4bODvUz5gcSJ0AKflgCT11YxZiAPPHYl/TtEDga8ACmIC3dV3/jaZpGaiAfjSwHrhB13XXka4l8/3R83h9LN3VhMPtJTchFJvTy2VPLWf26CT+cdXog0+oWA3L/gnFX4DPA7F5MP0XMGS2BHqE6Kdkzh+gdB0WPQJf/w1X7iU84PweHxe2M3NIPI/OGXlgEF8IcVqQAM9pprKlm+ueW01zl5Pnbz6DiZnRp34QXg+UL1eBnuZi6KxVN7cdvG7QvRAQDIHhYLJAazmgg2aAlPGQcx4MPh/ih8mHDSGOk58W+xpg1XW9S9O0AGAZcD/wI+Dd/bbkbtJ1/T9HupbM9yfm758X8cRXxbxw8xlMz4s79EG2ZtU1ce3zKitz8Plw4d8gMu3UDlYIccJkzh/glv8Lvvg1emgiS1O/zx2bsokJDebduycTHyalEYQ4nUiA5zRU1+7guudWUdVq5+XbJjA+I8rfQzqQrh8YuHF0QNVaKF+hvlGu3aTuD0mArOmQfiaExkNgpAoKBQSpmzkETPLNhRCH4u90fU3TglGL/buAj4EEXdc9mqZNAn6r6/r5Rzpf5vsT4/R4ufhfy+hyevj8gbMIDTxCbTavB1Y/DYv+BLoPRl4F+bdC0iGyf4QQfZLM+aeB8pXw+S+hej32yDzubrqMsKHn8fi1Y/09MiHEKSQBntNUc5eTOU+vpLXbxfv3TCEtuh/VwumoVYGekkWwexHYWw9/rDUWwpIhNhfG3ghpUyTrRwj8t9jXNM2ISsnPBp4E/gas0nU9e8/jqcCnuq4PP9J1ZL4/cRsqWrniPyu4cEQiT1w75tvrNbRVwpK/wJZ3wGOHpDEw+joYfgUE97EvCoQQB+iPc76mad8DvgcwaNCgceXl5ads3P2WrsO2d2Hhb6Gtgq2+dIKm3k3W9JukyYkQpwkJ8JzGSptsXPrkcmJDLbx792TCjvQNbl/l80JziarpY29TBZ09drXdy9EBHdWq2HPVWnC0QdxQGHm1qv0Tna1q/xj21P0xGPz9aoQ4ZfrAt7kRwHvAr4EXZbHvH08tLub/FhTxw5mD+eHMnKM7yd4Gm9+E9fPU1i1DAGROg7AklUUZEqe20yaNkSxKIfqI/jjn70/W+MfI48S1/hWqFvyTTL0SPTgG7YzbYMoPwRzs79EJIU6io53vTadiMOLUyoix8vT147jh+dXc/UoBD5w7mOzYUMKD+1Ggx2CE2KP4UOLqhq3vwJq5sPA3hz7GGquKOQ+aCOEpqh6Q16XuTxylgkFCiF6h63qbpmmLgElAhKZpJl3XPUAKUH2Yc+YCc0Et9k/ZYAewu87OoqTBxmMLd5ERY+WS0cnfflJQBEy4E8Z/D+q2qGBP8Zfqv/cG2UEV0k+bBGNvgryLVBF9IcRp6XjmfHECTBbME26jKOgCfvXGy/wldgWDlvwVtr0PVzwHiSP9PUIhhJ9JBs8A9ubaCh56dwt7/xenRgXx2NWjGZc2QFPubU0q66e5GGyNqkuMzwOtZVCxClpLD32eNU61dh9zPaRNPvRWr2/WDxKij/JTwc1YwL1noR8EfA78FbgJeGe/gpubdV1/6kjXkvm+9zg9Xm54bg0bq9p4YGYO03JjyUsIpdvlZWt1OyWNNi4ZnYTVcpTf9diaoGKlqpu240Nor1TdEvNvhfF3QFDkyX1BQoiDyJx/etJ1nRueX8PmqjaWXQlhC+6D7mY452GYdK9krwsxAMkWLQFAdZudwtoOihu6eG1NBQ0dTp6+YRxn58T6e2inXlcDdLf0tGzvqIbazVBTAIWfgKsTojIhJnfPCboKFHXUQledOsdshQBrzx9OQ4DarpA+BQZNgoi0Q6fIdrdAw3a1xSzjbNneIE4aPy32RwLzACNgAN7Sdf33mqZlolrmRgEbgOt1XXce6Voy3/euFpuL2+atZUNFGwDhQQF0Otz49vzpv2FiGn+49Ig7KA7N54Vdn8OaZ6HkSzCHqiDPhO+r4vhCiFNC5vzTV3FDJ7MeW8oFIxKZMchIzppfMax9CZsMw9iaczejpl7MsOTwb6/DJoToFyTAIw7S2Onkpv+uYVdDJ/+8ejQXj0zy95D6DpcNtv8PNr+lvgHZKzhaFXMOjVcfaFw2dWPP742rCyrXqgDQXoERqlYFmjrO2anaxe8VFKW61QyZrQJKIfGn5puWpl1Q8hVknKVqFX2Tzws7P4OKFRCaBBGD1C08RX0zLwuEfsHf9RhOlMz3J0ddu4OluxpZV9ZKQnggo1Mj+Hx7HW+ureSDe6cyPDn8BC6+Bb5+VM2h6CrQnTRGzW8BQWAKVNsGMs6WeUSIXiZz/untkY+38+xSlaEeYjFyT/gqruv6L2G+drb40vkg6DLiJl7DbWfnYjDI/CtEfyYBHnFI7XY3t89by7ryVn43exg3Tkr395D6P11XW8Oq10NHlSr+bGtCBYE0CAiGuDyIGwa6Fza9DoUfqzpAAEazCvrs/eCjGcFkUR+KLCEqYBQUqepjBIaDJUw9R+0mqN8K1hhV+DT1DIgfDjE56liPC1p2Q+1G2PAKlC3tGXPKGTDiKnU93QdtFVDwkhq/ZlTj3J8pUAV7YnMhdggkj1XFVwOCTs576uxSwaiqNap+Uva50iXiKMliXxytdrubGY8uJj3Gyvw7J5344r9xJxR9AjUb1K2jWm2T3SthJEx9AIZeouqsCSFOmMz5pzenx8vy4iYyY0IYFBWs5nG3ne51r+Fe9gThtlKa9DDKwvIZdeZ3CMieBpEZEmwXoh+SAI84LLvLyw9eL2DhjgZ+MCObH52bg6ZpuDw+dHQsJll4n3TdLVC1DtorVItie0vPYz4veBzgdqgMIXtrTycxZ7s6xhQECcMhfpjaela5Brqbeq4RFKWO3xuoiUiDcTdB7kVqO8X6edBUdOCYMqdB/m2Qe4HqVNZWrgI/nbXqg1pLKTQWqqCR7lNb1XLOg+Rx6t8+jxpL006VLaTrEJaoOvCEp0JUBkSmq21q7VXq1lEDnXVga1CBLkuY2gpXtRa8TvZlQZlD1bjSp6hgVmxu//qA6GhXr+0ULKhksS+OxVvrKvnp25t5dM4orhyX0vtP4PWA2wbbP4Dlj6kaaUGRKpsnawZknHn4Dxtej6rzE54ihZyFOAyZ88Vh+XzoxV9Q/OULhNWtJF5TW3VJGAFXvggx2X4dnhDi2EiARxyRx+vjl+9t5c11lUzOiqbL6aGwtpNgi5FXbptwYun64uTxedWWL3MIGPcrjKrrqph0YyE0FqmC0tZYlc0Tk6O+Od9/G5iuqwCOzwuaQV0v5CjrMrkdahvX9g+g8CNVp2ivACvEDFa3vXWOOmpUEMv7jW34piAV/AlNVM/tdYOzQwWAUs5QAZ2UM1RB123vQtGnPdvnAqyqy1pMjgr2xA1Twa7AcBUcqlytntMcrOomBcdAbJ7KpApL6XkvfD4VXOtqgPBkdX5v8XnVmFc8AZWrwBKuxhqVCYFhe+o5Bav332BSP9HV/xuzVdUzOQ6y2BfHwufTueLpFVS2dPOjc3PJjLUyOC6E6BDLSXiyPb8ThR/D7kU9W1dDElSXw+AolXnosatAUEOhmjcCIyD3Qhg6WwWFTCdhbEL0UzLni6PxfkEVT769gEvDd3KXPh+D7oHLnoG8C/09NCHEUZIAj/hWuq7z2MJdvLm2ksxYK8OSwvhkSx3dLg9v3jmJnPhQfw9R9HU+r8oyMph6tpYd6pt4n0/VKWotV9u6wlPVh7ljyWjRdZU9VLUWqgv2ZArtVEGkb9IMKnDktquaSd8MLhkCVMaQx9GT5RQYDmc/pAIrR5st4POqgFrlKtWprXaTGqfJrLKu2ivV1raR16jgVNNOlQnl6lLj8rkPfd2QeHhw59G/N/u/dFnsi2O0raadG55fQ4vNte++/LRILhmTzEUjEomynoSi8LqufnfKl6vOXJWr1e+r0aJ+fyLTVdA2KhMqVqvAkLNdBUqHXAzDLlet2s3W3h+bEP2IzPniaH29s5G7XllPuqmF54MeJ8FWCGf/TK19pOuWEH2eBHjEcSlrsjHnmZVowBPXjqG6zc768lZSo4K586xMqcQv+h5HOzTsgPptKhsnaazK/AkM6zmmu0VlNzXsgK56lS3kdakgT2iC2tK28VWVVRA9GKbcDznn7ymWvYfbrgI0DYWqI1pNAVRvUN3XAKxxaruaMUBdXzPAiCtVMW3jYdpQez0qwOTz7gk0aeo8TTvu+kay2BfHw+fTqe1wUNLQxabKNj7YVMOuhi4CjBqXjE7mjjMzyU3wY9Df44Ldi1U2X+HHKttPM6ogUPI4lQ0YHK22fxnNKugcHA0p+VJrQgxoMueLY7GjtoO/f76TpYWVPBLwAlcaltCVfCYhVzyhttILIfosCfCI47azvpOrn1lJa7fKLggKMGJ3e7lnehY/OT/Pz6MT4iTRddVF7PNfQfMudV/iaFVgurXswE5pBtOeD5b56gNk6gSVadAHPkjKYl/0Bl3X2VHbyRtrK5i/rgq728vZObHcd04249Ki/Ds4t0MVja9creqP1W4CR9uhj40fAVPug2GXqUBqd5MK7lrjVBF7Ifo5mfPF8ShrsjFvRSn6+nk8yEtYDD4cUx4kdNoDKotSCNHnSIBHnJDyZhurS1sYmRLO4LhQfvX+Vl5fU8GvLx7KbVMlwi8GMF1XbZ93fQbFX6mMmsh0iExTtYVi8yAqq88ugGSxL3pbW7eLV1dX8N9lpTTbXEzNjuG2qRlkxlqJDwskMKAPFDz3uNQ2SHur2vro80D9dlj5b5W9Zwg4eEukOUQVeM44EzLOUr/n9lZ1HWusygTsT8XcxWlJ5nxxIjocbl5asILsgj8yy7CG5uBMDGfcisun4XR7cbk9BJgtREdGYLWGoJmt6outoEh/D12I044EeESv8vp07nm1gAXb6rh+4iBMBgM2p4f0GCtzxqUQFyYtrIXoC2SxL06WbpeHV1dV8MzXJTR19dTryYyxcte0LC4bk4zJ2MfqOPh8Klhbtkx9ILHGqC1cXQ3qVr9VZQJ5HAefGxwNg89XAaD44apI+jcLPHc1qq2b8UOP7gOP16PqhkUM6hMZf6L/kzlf9IaK5m4+fPsFZlf/k1RD4xGP7Q6IxnT5U5iHzDpFoxNCgAR4xEngcHu5+9UCluxsJNhsJNhspL7DicmgMXNIPHdPz2JkSsQB52yqbCM92kp4sLS4FeJUkMW+ONnsLi8FFa3UtjuobbPz2fY6tlZ3kBFj5cpxKQQGGDFoMHZQJKNSI779gv7mdqji7V31PXV8Wnarws67Pu/Z/mUwqcweS6jK/mmvAluDeswSBpPugYl3HdiNz+uGus2qCHvp11C2XNXtikyHEXPULTb3lL9kMXDInC960/rSRiqrKgm2BBBsMWG1BNDVbaemsYXGljZaGyqZ0/w0eYZKyjKvI+2aR9HMwf4ethCnBQnwiJNG1/V9xZZLm2y8vqaC+esq6XR4+Mn5udxxZiY2l4fff7id+eurSI4I4pkbxknrdSFOAVnsi1NN13W+2F7PPxfuYkdtx777zUYD824dz6SsaD+O7gT5vNBcojJ96repIJCzU91C4iFhOESkwabXofAjFdyJylTBIJ9HFXbfmx0UlaW2gsXmqnpfpUtA90HiKBh5NeRdpM43mlU3sf2Ls3vdYGtSWT8h8UfO/rE1qU6DNRtUMCp9qspAOlSXHF2XTKJ+TuZ8caqt3FlN3dsPcZnrAypNg3BP+QmZZ1wAIbH+HpoQA5oEeMQp1d7t5mfvbGbBtjqmZsdQ1myjps3OjZPS+XxbHc02F49cNoIrx6X4e6hCDGiy2Bf+ous6NpcXr0+n0+Hm5hfWUt/u4K3vT2JIYti3X6C/q9kAq59RNXx8HhU8iRsKqePVLSzpwOM762DrO7D5LajdePD1DAFgDga0A4tIB0X2bBmLzFCdbxztquV82XJoLd1zoAbsWeMFhkP4IHW9gCB1fGcd2BpV58D0qeoWna3GGRSpxu/qUh0EzcEqa6m3g0ESYDphMucLf/D6dL7+5A2GrfsFcbQAYIvIJTh3OtrYG1UjCiFEr5IAjzjldF3nldUV/OGj7SSGB/KPq0YzLi2S5i4n9762gZW7m4kJsZAWHUx6tJUJmVHMyIsjJsTy7RcXQhwVWeyLvqK6zc4VT63Ap+u8c9dkUqMkjf+wGneqzmBel7p5nOCygbtbBUGssap+kM8LDdugbis07QJne881AiMgbQoMmqA6/CWOUoGhsuVQsULVHHJ1gatbBXxCEyE4UmUmVaxSz7XXoYpSo6lxDLsUxtwAiSMPfh3dLdBSCoFh6jkCw3vqFvm8KqOpcrUKaNVvV/8OS4RJ98KoayHgEPX8uhqhcYfKpGqvhMHnwaCJJ/yWDxQy5wt/stkdfPHl51QVLGCUexMTjDsJwE175sWEnP8rTPHSfVeI3iIBHuE3DZ0OwgIDDuis4vH6eH1NBVurOyhvsVHcYKOpy4mmwZjUCP50+QjyEnq+4a3vcDB/XSU3TEyX+j1CHANZ7Iu+pKiukzlPr8Dl9TFrWAJz8lMZnxFFQF8rxtwf6brq+tVSqgIjsUMOvQ3raHhcUL8F2iqho0ZtRTMFqlbypkCVxePsgOZiKPwEvE71fLE5EJ6qsnBKl6qW9XxjXRlgVRlBzg51g54spLihPQEfaxwMPlcFnkITVACrdInqgvZNQy+Fmb9V2UunmrNTbbEr+kRlasXmqWwqg0kFuOytKpilAZpRvZbIDPWzej0UL4SKleo9iEhT9ZjG3AAx2cc1HJnzRV/gcHt5a10lry/ZxEVd73CLcQGBuFgRPIOg4RcwZswEjLGDVQZhd4vKeKwpgLYKGHuT6swlhDgiCfCIPk3XdbbXdvDVjgZeXlWOT9d5685JZMaGUNfu4Jq5Kylr7iY9OpjnbsonOy7U30MWol+Qxb7oa4obOnlheRkfbKqh0+EBIDDAQIglgOm5sfxkVi5xodKJsd/oboEt82HnAvXhrL1KBTRSx0PmNBW4cdlU9pC9Tf3sblEf7FInqOMi03u2Zum6KkC98klVkLqrXtUmMgVB2iRVtyhxtNo+FhShjlv+uMp0Ck1SQaiA4D1ZT12q5pElTAVQgqPUz6BIFWDZO74jbQvz+WDDyyqzKSUfUs5Qr694obqVfq0CXNY49dytZWq8RysoUm2Hc9mgtVy9hzd9qF7rcZA5X/Qluq5T3WZny84SwtY/ybiGdwhEdV3U92QBanuL04MKALttMPwKOOc3EJm2/8XU7XgD10IMMBLgEf1GcUMXVz+zErPJwL+/O4Yfv7WJpi4XP5uVy+Nf7sLh9vH4NaM5Z0i8v4cqRJ8ni33RVzncXr7c0cDuxi66nB4aO518uLmGQJOR+2cOJj89isZOJy02J2MHRTI4XgL7/YKuqyLQJnPvXM/nVbWBgiIPbku/V0cNrJmr6gg5O9X2MlMgmK3qHGenyqTpbgX7nqyavVvQQhMh+xxImwppkw9sWd+4Ez68T2XYGM0qiLS/qCy1RWzobBWsMhhVdlNzsXo8aE9AyWhWQR+fR421tVQFwuKHQ/JYdd7+rxcOvO8YyJwv+jKfy86y1av5esVyrB0l5AW1MWjwSPLGTcOYMgY0gwrYrvi3+p0ZdU3P71RLCXg9MOV+mPyDPTXJhDh9SYBH9Cvbazq4Zu5KOhweQiwm5t06nnFpkdS02fney+vYWt3B1fmpPHRBHpFWM3aXl/c2VFPX4eDWKelEBPfSwlKIfk4W+6I/2d3Yxe8+3M6SnY0HPTY+I4rrJ6YxMSOK2FDLvu6NQhyXznqVgbPrc9i9SBWaBhWQCYxQ2TiNRSob6Pw/qc5mjYVqCxlA1gz/bAn7FjLni/7A59NZsK2Oxxfuoqi+k/ToYO6ens1lY5KpbOlm9cbNpBQ8ykT7EtxBcQQn5qisvc461aEwLFltyxx+5aEzeuxt8MmDKqB66X8OzAQSYoCQAI/odzZUtPKXTwv52QV5jB0Uue9+u8vLY1/u5PmlpYQGmrhgRCKfbKmlrVsVlHjdFwAAIABJREFUgIwJsfC72cO4cESCfAAQpz1Z7Iv+Rtd11pS2YHN5iA0JxGox8vn2el5dXU5lix2AEIuJrLgQbp+awcUjE2WuFyfG54OG7VC+Qv10dYGzS23jmv4LCInz9wiPmsz5oj/x+XQ+317PE1/tYltNB8FmI90ulcWWGx9KgBG21nRyVX4Kv509jGCzSf2eLnhI1fhKGgtn/hhyL+wJ9FQXwPybVZZcQDAYTXD5czB4pv9eqBAngQR4xIBTWNfBr97bSkFFK+cNTeDWqRmEWEz87J3NbKluZ1puLPdOzyY/PeqQ5zvcXnQdgszHlwYtRH8gi30xUPh8OmvLWiiq72R3o41Vu5sprOtkRl4cf7h0OMkRQf4eohB+J3O+6I90XWdRUQOfbKljZEo403PjSI0KxuP18fiXu/j3omIyY6w8ce1YhiaFqaDsptdhyV+hrVxl90z+geoK+MXDEBIPc16A4Gh48wYVuJ32czjrJ1LDRwwYEuARA5Ku6zg9voM6dL2wvIwnFxfT1u1m7KAIbj8zk5lD4jGbDOi6ztvrq/jjxzvQdZ17Z2Rz46T0A64hxEAhi30xUHm8PuatLOfvnxfh03Xy06IYkRLO0MQwIoPNWC1G3F6d5cVNLNnZSGmTjdGpEUzMjGZyVjQjksMxGCTzRwwsMueLgWh5cRM/fHMjjZ1OEsMDyUsIJS8xjMQQE5HlCxhdOY9Ux04AnJnnYbnyGVVUHVTQ56MfwuY3IWEE5MyCzOmqYPreWmFet9rWFRSpMn6E6AckwCNOO90uD/PXVfHcst1UttiJtpq5bEwyRfWdLN3VxBnpkQSbTSzZ2UhyRBD3TM/m0jFJKv1TiAFCFvtioKtq7eaZJbvZUNlKUV0nbu+B6xiDBqNSIxgcF8KGijZ2NXQBEBdqYebQeGbkxjEuLZJIq9RuE/2fzPlioGrqcvJuQRU7ajvZUdtBSWMXbq+O0aARF2JmZlARrS1NrAyYxN+vHs203P22Vuo6bHgF3/oX0WoK0HQfLkMQDnMUIb4ODK5OdVx4qiriPOYGCNivm6OzE6rXQ/wIsEaf2hcuxGFIgEectrw+na93NfLW2kq+2F6PxWTgoQvyuG5CGgaDxoriJv6yoJDNVe2EBZqYk5+6JzU0iKSIIAKMksop+i9Z7IvTidPjpbTJRqfDQ5fDg0/XGZcWeUDh/aYuJ0t3NfLF9nqWFDVi21PvISPGyvnDEnjwvBxMMu+LfkrmfHG6cHl8tNvdRFnNGPdkY+6q7+Te1zZQVN/JnWdn8v2zsiioaGVFSTMrS5oprOsgRLcxybCds0zbsOpdtOqhhEfHk5uWxJCWrzBUrVFbvCbdA2hQ/AWUrwSfGwLDYfovIf82yfQRficBHiGAVpsLg0EjPCjggPt1XWddeSvzVpSxYGsdHp/6PTAaNKblxHLn2VmckR55QCHPksYunl9Wyqdbann4O0O5bEzKvseaupy8s76Ki0clSV0I4Vey2Bfi8BxuLxsr29hQ0cbasha+KmxgRl4c//7umH3ZnE6PCgBZTLKNV/R9MueL053D7eX3H23ntdUV++4zmwyMGxRJfnokeQlh5CWGkh5tpaKlm3fWV/FOQRW17Q7SooL41yQbo3Y/C6VL1MlxQ/FkncN2Yy455W8RWPk1xA6B8/4IxgCoWgNV66BuizreaAZTIJitkDAckvPVdrCYHKn/I3qVBHiEOEotNhc76zupaOmmuKGLt9dX0WJzMSo1gqxYK06Pj+YuJ6t2t2A2GUiOCKKypZvnbz6Ds3NiqWt3cN1zqyhptBFg1LgqP5V7pmeT9I1Az97fNen+Ik4mWewLcfReWVXOw//bysiUCO6fOZhPNtfy6dY6fLrOuUPjmT0qicFxoXQ63dicXlKjgkgMlyC+6DtkzhdCWbi9nq017YzPiGLsoMgj1tr0+nSW7Gzgjx/tYHeTje+MSuK3Ew14A0J4ebuH19dU0NTlwmiAX2SVcmP7XAI6ynsuEJMDiaNVTR+PEzwOcLSrTl+OdnVMYDjkXADDLoOs6WCynOR3QAx0EuAR4jjZXV7eLqji1VXldDk9mE0GggKMzBwSzw2T0rCYDFz1zCrKm238fc4o/vxpIS02F3+9YiQrSpp4a10lPh0mZERx/rAEBseHsKSokc+21dFmd/Ojc3O4bkLavvRSIXqTLPaFODYLttZx3xsbcHl8hFhMXDA8AZPRwKdba2nrdh90fF5CKNPz4hiWFEZSRBCJ4YHoOticHrpdXjJirYQFBhzimYTofTLnC3H8nB4vTy/ezZOLiwkwaDg9Pry6zjl5cVyVn8rq0hZeW12Bz2PnpymFRMUl0RIxEi0ogiCzkZTIIDJirCSFB6ki/j4fNBdD9TooXQpFH6uAjyUcci+AnPMhawYERfTei6jbAl89ApZQmP4LiMrovWuLPkUCPEKcRA0dDi57agXVbXbCgwKYd+t4RqeqybqqtZvXVlfw2bY6ShptAAQYNSZlxeD2+Fi5u5kRyeH8dvYwxg6KkIwe0atksS/Esdtc1UZFSzfn5MUTZFbf+ro8PpaXNNHc5SLEYiLYbKSwroNFhY2sLWvZt7X3mwwaDEkMY2JmNNeOTyU7LvRUvhRxmpE5X4gTV9pk44kvdxFlNXPjpHQGRQfve6zF5uLF5aW8vKqc1kME/UFtCcuMsZKfHsnU7BgmZcYQHhyA3W6nav0n6NveZ1DjYgI9HaAZYdAkSB4DtiboqIaOGnWhzGmQPRMyzlJbvgCcXdBZBwYjRKT1bPuyt8GiR2DtcxAYobKIvG4Y/z0468GermJiwJAAjxAnWUljF3//vIh7pw9maFLYIY8pbuiitMnG+IwowoMC0HWdDzfX8oePttPY6SQvIZTLxyaTEx/K5qp2NlW2ATApK5rJWTHkJYQetq2v16czf10lowdFkJdw6OcXpx9Z7Atx8tmcHipbu6ltc1DTbseoaVgtJswmAztqO1i9u4X1Fa24vT5mj0rivnMGkxUbsu/8VpuLf3yxk0+31vLri4dyyehkP74a0Z/JnC/EqeP16bg8PpweL11OD5UtdsqabZQ22Siq62RtWQvdLi+aBimRQVS32tn7XYARL5Mtpfwko4wR3avRmoogJAHCkyEsCdx2KP0a3N2qrk94KnQ1wN6OXwCmIIjNUVvEShaBvUUVgJ7+C7VVbPGfYMMrKptn5DVqa1j6VPVv0e9JgEeIPqzT4eb9jTW8s76KjXuCOgDZcSH4fDq7m1TmT7DZSEaMlczYECZnRTN7VBJWi4nadjv3v7GRNaUtWM1Gnrp+HGfnxPrr5Yg+RBb7QvQNLTYXc7/ezbwVZTg8XoYkhDEhM4poq5lnl5bS5fSQFh3M7kYbN01K45cXDaXT4WbpriYqW7o5Y08dCbNJinSKw5M5X4i+w+XxsamqjeXFTRTVdTI4LoQRKRGMTAmny+nhtx9sY+muJvISQrlnWhZ2t4+adju1bQ7sbi+JVo2Rvu3kda0mTm8iNCYVQhMgNBG8TmgohIbt0FgE0Vlw/iOQOOrAQdRvV5k9xQtVVo/BpAo/JwyHiEEqCyg8BbrqoWknNO2Clt0qsOT1gM8DmqayjHIvUNlEAcdQe67oU/XcmdPVdjRz8LefI46KBHiE6CdKGruo73AwPDl8X92G2nY7y4ub2VbTzu5GG8UNXVS32QmxmLhwRAJfbK/H6fHxs1l5vLm2kp31nfz58hHMyU897PMU1XXy2bY6wgJNJEWoPcOD4yWiP9DIYl+IvqWpy8mbaytZUdLE+vJWHG4fU7NjePg7Q8mIsfLXTwt5blkpMSEWmm1O9l+WBQUYGZcWSU58KIPjQwg2G9lZ30lRXScNnU48Xh2vTyc3IZQHz8s9YFuBOD3InC9E/6HrOgu21vH7j7ZT2+7Yd39MiAWrxUh9hwOH27fv/gkZUdwyJZ2ZQ+IxGQ34fDrFjV2sL2/Fp+sMTwonNyH00AWl3Q6oXA27F6vMoOZdPQWg9xeSoIJF5hDVJcxgVOeWLQO3DQKCYdBE0Azg6lb3mQJh6gOQM0sFgwB8XhVYWvp3tQ1N96pzs2eqW/wwiM09OJvI41JBqKPpONa4E9Y9r7ay5V7w7ccPMBLgEWIA0XWdgopWXllVwceba8mOC+Hf3x1DZmwInQ43d71SwLLiJuJCLVgCDASajKRFWxmaGEp8eCAfbqph1e6Wg647Z1wKv5k9jBCLag9c2mRjW00703Lj9t0n+hdZ7AvRdzk9XurbnaRGBR1Qf+2TLbW8W1DFqJQIzs6NJS3KyurSZpYXN7G+opWSBht2t2rfbjJoZMZaSQwPIsCooWkay3Y14fXp3DIlnXtmZB91kWeH20uA0SBF//sxmfOF6H+6XR521HYQGxJIfLgFi0kFaHRdp8Phob7DweKiBuatKKe6zU5SeCDZ8aFsqGil0+E54Fomg0Z2XAhn5cRy9RmpB2wHBmjsdLJ0VyPJEUHkJxgwtlequj/WOIjJVt2+DsXjhLKlULQAKleBIUDVBQoIVoWkW0og6xyY9WcIiYN3bleZO2NvhFl/gaq1sOND2PERdNX1XDcsBYIjVQ0heyu4usAaq7qNDb8SUsf3BI32qi6AZf9Q12JP7GLK/TDjYTCehM8r3S2wc4Eav61RBbNyLzx4XKeYBHiEGKCcHi9mo+GADwcuj49nl+6mqrUbh9uHzemhpFHV//Hpah/w9RPTuCo/FZ+uU9NmZ8HWOp5eUkJqVDAPzMzh0621fL69Hl2HUIuJK/NTuOaMQWTHhRzV4r+ypZvGLifp0VYigwOkeLSfyGJfiIHH59OpbrOrLl0x1oO2bdW1O/jbZ0W8U1BFZHAA984YzPUTB+370NBudxMYYNj37+o2O08vLuHNtZX4dJ2E8EDSooO5e1o2U7JjTvnrE8dP5nwhBi6vT+fLHfW8tLKcpi4nYwZFMi5N3UwGja3V7Wyr6WBTVRsrS5rx+HTGZ0RxdX4qHQ43n26tY11Zy746QNFWMzOHxHPesHimZMccsZX8kQfmhjXPwuK/qIye4BjoboYL/wb5txx4rM8HraXQWKhuDYXg7ISgSFUIOjAC6reqgIrHobaRxQ4Bj11lEjk7oXGH6kQ24Xsw7hZY+iis+y+kTYUr/wuh8cf3OlrLVLcze4sKODnaVaZT2XKVgRSWAiaz2sKWfiac9wdIGnN01+6ogW3vQ1u5er+8LrX9LXXCwe/RUZIAjxACh9tLVaudjBjrIYM0a0pbeODNjfu6gd0wMY1JWdHMX1fJx1tqcXv1fZ0BhiaFMSMvjrNzYgnd79vh9eWtzP26ZF9wCCAs0MS1Ewbx0Kw8CfScYrLYF+L0taWqnb8uKGRZcRPJEUFkxYVQVNdBfYcTgwbp0VaSI4NYtbsZgMvHpBAdYqa6zU5BRStVrXbumzGY+84ZLFk9/YTM+UIIgIZOB2+vr+LNtZWUN3cDkBMfwqzhiZw7JJ6Klm4+21bHosIGOp0erGYj0/LimDUsgel5x5m5b2ui/eOHMVWtwnLFU5jSJh7/C3B0QNEnsPUdVR/IFAQmi6r/M2gS5N8Kgfs1ldn4Onz0gMpAGn0tRA+GmMEQlaW2e3lcqm6R1622lxkDVBaSqwsKP4Jt70HNhp7raUbVvj4kQbWzH/IdFczxeWD9i7D4zyqIlXEWWMLUNQ0mteUsPFUVyw5PgeYS9RrKlgG6CkwZA3qef9glcN4fj+stkgCPEOKotNvdFJS3MiEzimBzz+Te0OFg8c5GShq6KG7ooqCildZuNwFGjfRoK90uLzaXh7Zu977g0OjUCMpbullb2sKCbXX86Nwc7jtn8L5rbqtpJ8pqJjG8p1jb+vIW/rO4hPOGJTBnXMoBASG7y7uvZfH+dF2XwNFhyGJfCLFsVxOPf7kTm9NLXkIoOQmhdDs97NrT2XFcWiR3T88mOaJnLu52efj1+9t4p6CKSZnR/GRWLmNSIw4513q8PtrsbmJCLPvuq+9w8OhnRRQ3dnH/OYOZlhu37zGvT6fb5SHEYjqqubu5y0lIoGlfxpE4PJnzhRD78/l0NlS2Eh5kJjsu5KDHXR4fK0qa+GxbPV9sr6Opy0WAUSM1MpjkyCBSIoNIiQxmaGIYw5PDiQ21HHT93U02Pt1Sy8dbaimsU12+ksIDuXVqBteMH3TqyjzUb4MPfgC1m1Qg5lgkjVHbwnIvVEWszdYjb8FytMOyf0LJV6rekM+jfjra1Dau/UUPhhFXqi1nMdnH/roOQwI8Qohe5fWpOkALt9dT1mzDajZhtZjIiQ/h8rEpWPebzHVd58fzN/FuQTV/uXwEk7Ni+P1H21m4ox6jQWPWsATm5Kfwv401vLehGovJgNPj46ycWB65dDhFdZ28uKKM5SVN/G72MG6clL7v2i+vLOP/Pivi5snp3DUt64CglJDFvhDixMxfV8lvP9iGzeUlOy6E2aOSCA004fXptNvdbKhoo6CilW6Xl8FxIZwzJB6zUePZpaV4fTqxoRaq2+xcMDyBS0Yns7iogS+219Nsc2HQIDQwgJEp4dx3zmDOSI866PlX7W7m1hfXkh5t5cVbzyAuNHDfY/UdDmJCLJJdtB+Z84UQx2vv2v6rwgbKm21Ut9qparXTbHPtOyYhLJDsuBA6HG4aOpw0dTnx7NnzlZ8WyYUjEkmKCOSF5WWsLm0hNNDEhcMTCQzo2Urs9ul0Ojx0Odx0OjxEBJs5d2gc5wyJP+CLgkOpabNjtZgIDzpCbTmvG1rL1faqllJ1n8kMRotqOa971TE+t8rUyZwGURnH+7YdzO1QdY3aq9S2s/jhJ6VejwR4hBB+5fb6uH3eOpbuasRkNGAyaNwzPZsOu5vX11TQ4fBgNhq4/cwM7p6ezbsFVfzl00K6XaqQaEJYIIkRgWysbOOxq0dzyehk3l5fxYPzN5EWHUx5czfxYRZun5qJy+ujtt2Ox6szOTuGswbHEBFsPuYxVzSr9NWxaRGMSzv4g0d/IIt9IcSJ6nS4+XhzLfPXV7G+vHXf/QYN8hLCyE+PJCE8kGW7mlhT2oLHp3PhiAQemjWE+HALzy0t5V9f7sLp8RFiMTE9L45hSWHYnB5au10s2FpPU5eTMwfHcNe0LCZmRGMwaKwobuLWeWtJCAukodNJdIiZl26dgMVk4NHPinh3QzWTs6L593fHEmU9eI7XdZ2adgeJYYEYTpMgkMz5Qoje1ulws72mgy3V7Wytbqe0yUZ4sJm4UAtxoRZSIoOZnhd7QEY+wMbKNuZ+XcLKkmb2jzCYDBqhgQGEWEyEBpqoaOmmqtWOpvUEiS4dnUzkfvN6Q4eDRz8vYv76KkIsJu6bMZgbJ6ed1pmdEuARQvidzenh3tcKiAw289NZeSSEq29iu10eFhc1MiwpjLRo677jK1u6eWllGaNTIzlvWDxen85N/13D+vJWbjszg2e/3s3krBieuymfbTXt/O7D7WyuUi0fI4MD8Olqy5lBg0lZ0dw7fTCTsqKPOEaH28v89VW8va6STXuuFRpo4v17phzUiaA/kMW+EKI3tXe78ek6JqOG2WQ4aHHdbnfTYnOREWM94P6aNjtlTTbGpUcedI7d5eWVVeU8vaSEZpuL5IggZg6J4421laRHW3n1jglUtdq59cW1+HQdu8uLDlw8IpGPttQSG2Jh7o3jGJbU0/2lsqWbX76/la93NjIoKpjvThjEpaOT8ek6LTYXbq+P4cnhBBiPohVvPyJzvhCiv9F1nR21nXy+vY7PttWzo7YDs9HAecPimZOfyubKNv6zpAS318cNE9PZ3dTF4iI1tz90QR4XDE845HZfXddp6HQSG2IZkEF+CfAIIQaEDoeba+euYltNB+PSInn5tvH7tmX5fDo17XairRaCzEa8Pp1NVW0sKmzgrXWV1Hc4mZwVzeVjU3C4vXQ43JiNBtKjraRFB7NydzNPLSqhrsPB0MQwZo9OIj8tku+9vJ6I4ADev2fKt7Ybbu92ExpoOuAPSafDzfLiZmYOicN0ij9MyGJfCNFf2F1ePt9ex7sF1Szd1UhOfCiv3j6B6D0p+7sbu7j3tQ3kxIfw4Pm5pEQGs7Gyje+/vJ42u4up2TFkxYZgMmo8v6wUo6Zx85R01pa1sqa05aDniwgOYEZeHNNy40iOCCQuNJD4sMCDupL1JzLnCyH6ux21Hby5tpL3NlTTbncDMGtYAj+/MG/fF8Ff72zkkY93UFTfSXiQ2uo7IjmcvMQwqlvtrC9vpaCilRabi7hQC7OGJ3DB8ETGZ0Qddluvx+uj3e7e9zenr5MAjxBiwGjqcvLm2kqun5h25D24+3G4vby2uoKnFpfQ1OU87HH5aZE8cG4Ok7Oi930bsGp3M9c/t5qzcmJ59sb8Q/5haOpy8qdPdvBuQTVDEsP40bk5nJMXx3sbqvnzp4U0dTkPKjJ9KshiXwjRH7XYXASbjUfVtreh08FfPi1kW3UHpU02XF4fM4fE8/tLhpG0p3D0rvpOFhc1YrWYiLKa8fh8fLWjgS8LG/Z9gAAIMGoMTQxjzKBI8hJCiQgOICwogG6nl201HWyvbcdiMnLhiASm5cZhNhoorOtkbVkLI1PCGTMo8qS9J0dD5nwhxEDhcHtZXNRAbKjlkKUSvD6djzbXsGp3C5ur2iiq69xXDygzxsrYNDWPry9vZVFRAw63jyirmWFJYWTGWMmKCyE2xML22g7WlbWysbINu9vLBcMT+PF5uYcsSt2XSIBHCCFQfyyq2+yEWkyEBgbgcHspa7ZR3txNQnggEzKiDpnm+fKqcn79/lbSo4PJSwhjcHwIEcFmzEaNDoeHZ5aUYHd7mZOfyoriJsqau4m2mmm2uRiVGkFkcADLdjXxv3unHLCNYK/GTie17XaGJ4X3ahqpLPaFEKcTr0+ntdtFtNV8VB263F4fu+q7qO900NjhZHeTjY2VrWyuat9XA24vTYOMaCvtdjfNNhdWs5EAk4G2bhUgCrWYeOfuyeTEh+47p6CiFYfbS3ZsCLGhFipb7KwoaWJrTTsXjUj61m3D/8/enUfZVdf53n//ap7nTFWBVCDIKGNEEEEQVKAd0FacRdun6dtN30f6qi3e1u7W272afuzrdK/autRunFptBHFCEWRolcEkhDGBhJBA5qQqlapKzVW/549zghECVIo6tc/Z+/1aq9apM1Tx3bUr3118zm84VPZ8SVk1Mj7J+p2DdLbUPmNdtn2jueUgblm7g/U7B9mwax+Do7mdtsrLAscuauS0w1upqSrnW3duYnh8kj8+dTF/cd4yFjXXUF1R9qzXlN59Y9z2yE5+tXYndVXlfOTCY+ZkFJABjyS9ADFGvnPPE9z+yC7W7xxkY88+pg5ol2cc0cY/XHICy+Y3MjE5xXWrtvDj+7fyuhM7efNpi9k7PM6rP3sHHQ3V3HDFWVRVlLFzYIQf3ruFmx7awcon9hAjHN5Wx1tfchh/9OJFLGiqOei28IfCP/Yl6dBNTE6xY2CUvUPj9A2PUV1RxjELm6ivrmBicoq7NvTyswe3MTE5xRlHtHPEvAb+9BsrqK4o44dXnEVzbSX/9LO1fP03jz/1PffvEAlQVV7G2OQU73jp4Xz0omNofJ7pv9Nlz5ek57d/fZ7te0dYNr/hD3b/7Rkc5Yu3PcY379rEWL5nhwC1leXUV1fQUltJS10lzbVV9O4b5d4n+4gR5jVWs3donKbaSj71lhM57+j5BT0GAx5JmkVjE1MMj08yNjHFVIzMb6x+3neLf/nwDv70Gyt41xmHMz4Ruf7eLYxNTnF8ZxOvPm4hi1pquG7VZu7a8Pu1Imory+nuqOfGD5w9ozr9Y1+S5sZ9T/bx1q/cyXGLmqgoL+Oex3t578u6edVxC556Y6C7vZ6zlrXT1VLHp3/5CF/79eO0N1TT2VzDwOgE+0Yn+MI7TmX5QbaMnw57viTNjq19w9y8ZgeDoxMMj00yNDbJ4MgEe4dzwf/e4QmqK8o49+h5nH/MAo7vbOKRHQNc+d3VPLJjgPecuYQrL3gRMUbGJqcYn4j0DY+xfe8IOwZG2dk/wosWNPK6kzpnVN90+33F871gpkIIhwHfABYAEfhKjPFzIYQ24HtAN7ARuDTGuOfZvo8kFYOqirJDXojzVcct4E2ndPGtu56guqKMS1+ymD85aylHHLA716XLD+Px3fu45/EeevaN0TM4RgoX/pek1DnpsBY+fenJ/MW3V1FTWcZn3noSbzxlMQBnLet4xuv/5o+O4+IXL+L//Go9k1ORxW11NFRVTHttOUlS4XS21PKeM7sP6WuOXdTEDX95Fp/6RS7A/8adm571tWUB3nb64TMOeKarYCN4QgiLgEUxxlUhhEZgJXAJ8F6gN8Z4dQjhKqA1xviR5/pepvuSStXg6AQ3PrCN849d8Iz5wYXgu7mSNLduf3QXnc01HHXAWjxzxZ4vScVhxcZeVj/ZR1VFGZXlZVSVl9FcW8mCphoWNFXT3lD9rDt6TUfiI3hijNuAbfnPB0IIa4Au4A3AufmXXQPcBjxnwCNJpaqhuoK3LD8s6TIkSQXyihfNS7oESVLClne3zXi67Ww6tPkGMxRC6AZOAe4GFuTDH4Dt5KZwHexrLg8hrAghrNi1a9dclClJmoEQwmEhhFtDCA+HEB4KIXwg/3hbCOGXIYR1+dtk9xOWJEmSUqzgAU8IoQH4AXBljLH/wOdibn7YQeeIxRi/EmNcHmNcPm+e74xIUhGbAD4YYzwOOAO4IoRwHHAVcEuM8Sjglvx9SZIkSQVQ0IAnhFBJLtz5dozxuvzDO/Lr8+xfp2dnIWuQJBVWjHFbjHFV/vMB4MApudfkX3YNuXXYJEmSJBVAwQKekNs/+GvAmhjjpw946kfAZfnPLwNuKFQNkqS55ZRcSZIkKRmFHMFzFvBu4JUhhNX5j4uBq4FXhRDWARfk70uSSpxTciUp/Vx3TZKKVyF30fo18GxJeWnyAAAgAElEQVT7gJ1fqP+uJGnuPdeU3BjjNqfkSlJq7F93bVUIoRFYGUL4JfBecuuuXR1CuIrcumvulCtJc2hOdtGSJKWXU3IlKTtcd02SipcBjyTphXJKriRlkOuuSVJxKdgULUlSNjglV5Ky5+nrruUGc+bEGGMI4VnXXQO+ArB8+fKDvkaSNDOO4JEkSZI0bc+17lr+edddk6QEGPBIkiRJmhbXXZOk4hVyO9cWtxDCLmDTDL+8A9g9i+UUs6wca1aOEzzWNCr0cS6JMZbsXuP2+2nzWNMnK8cJHutsmvOeH0J4OfBfwAPAVP7h/0luHZ7vA4eT6+OXxhh7n+d7pbXnW9vMFHNtUNz1WdvMFHNt8If1Tavfl0TA80KEEFbEGJcnXcdcyMqxZuU4wWNNo6wcZxKy9LP1WNMnK8cJHqtmRzH/bK1tZoq5Niju+qxtZoq5NphZfU7RkiRJkiRJKnEGPJIkSZIkSSUuCwHPV5IuYA5l5VizcpzgsaZRVo4zCVn62Xqs6ZOV4wSPVbOjmH+21jYzxVwbFHd91jYzxVwbzKC+1K/BI0mSJEmSlHZZGMEjSZIkSZKUagY8kiRJkiRJJS61AU8I4cIQwiMhhPUhhKuSrmc2hRAOCyHcGkJ4OITwUAjhA/nH20IIvwwhrMvftiZd62wIIZSHEO4NIfwkf39pCOHu/Ln9XgihKukaZ0MIoSWEcG0IYW0IYU0I4cwUn9O/yv/uPhhC+I8QQk1azmsI4eshhJ0hhAcPeOyg5zHkfD5/zPeHEE5NrvLSltaen7V+D/b8tJ1X+739frYVW78/lN+DBGor2mtIvhfcE0K4L1/bJ/KPF01/KNbrUQhhYwjhgRDC6hDCivxjiZ/TA+oryutbCOHo/M9s/0d/COHKYqgtX9+sXC9TGfCEEMqBLwAXAccBbw8hHJdsVbNqAvhgjPE44AzgivzxXQXcEmM8Crglfz8NPgCsOeD+PwOfiTEuA/YA70+kqtn3OeDnMcZjgJPIHXPqzmkIoQv4f4HlMcYTgHLgbaTnvP47cOHTHnu283gRcFT+43LgS3NUY6qkvOdnrd+DPT8159V+b7+fbUXa7/+d6f8ezLVivoaMAq+MMZ4EnAxcGEI4g+LqD8V8PTovxnhyjHF5/n4xnNP9ivL6FmN8JP8zOxk4DRgCri+G2mb1ehljTN0HcCbwiwPufxT4aNJ1FfB4bwBeBTwCLMo/tgh4JOnaZuHYFpP7h/ZK4CdAAHYDFQc716X6ATQDj5Nf+PyAx9N4TruAJ4E2oCJ/Xl+TpvMKdAMPPt95BL4MvP1gr/PjkH7emen5ae73+WOx56fovNrv7fcF+HkXZb+f7u9B0h/Feg0B6oBVwEuLpT8U8/UI2Ah0PO2xojinpXJ9A14N/KZYapvN62UqR/Dw+x/Qfpvzj6VOCKEbOAW4G1gQY9yWf2o7sCChsmbTZ4G/Bqby99uBvhjjRP5+Ws7tUmAX8G/5oaBfDSHUk8JzGmPcAvwL8ASwDdgLrCSd53W/ZzuPmelVBZaJn2MG+j3Y81N1Xu339vsCKJWfY9H9Wy7Ga0h+CtRqYCfwS+Axiqc/FPP1KAI3hRBWhhAuzz9WFOeU0rm+vQ34j/znidc2m9fLtAY8mRBCaAB+AFwZY+w/8LmYi/liIoXNkhDCa4GdMcaVSdcyByqAU4EvxRhPAfbxtOGBaTinAPl5rW8gdwHoBOp55tDm1ErLedTcSnu/B3s+Kez59vvSP4d64Yrh96BYryExxsmYmy6zGDgdOCaJOp6uBK5HL48xnkpuquIVIYRzDnwy4d+5or++5dexeT3wn09/LqnaZvN6mdaAZwtw2AH3F+cfS40QQiW5Rv3tGON1+Yd3hBAW5Z9fRC4NL2VnAa8PIWwEvktuiOTngJYQQkX+NWk5t5uBzTHGu/P3ryXXHNN2TgEuAB6PMe6KMY4D15E712k8r/s923lMfa+aI6n+OWak34M9P409335vv59tpfJzLJp/y6VwDYkx9gG3kpuCUgz9oaivR/nRHsQYd5JbQ+Z0iueclsL17SJgVYxxR/5+MdQ2a9fLtAY8vwOOyq86XUVuCNaPEq5p1oQQAvA1YE2M8dMHPPUj4LL855eRm2dbsmKMH40xLo4xdpM7h7+KMb6T3AXgzfmXlfxxAsQYtwNPhhCOzj90PvAwKTuneU8AZ4QQ6vK/y/uPNXXn9QDPdh5/BLwn5JwB7D1giKimL7U9Pyv9Huz5pLPn2+/t97OtVPp9UfxbLuZrSAhhXgihJf95Lbm1gdZQBP2hmK9HIYT6EELj/s/JrSXzIEVwTqFkrm9v5/fTs6A4apu96+VcLBqUxAdwMfAoubmcf5N0PbN8bC8nN3TsfmB1/uNicnNDbwHWATcDbUnXOovHfC7wk/znRwD3AOvJDa2rTrq+WTrGk4EV+fP6Q6A1recU+ASwltwF6ZtAdVrOK7kLxjZgnNy7GO9/tvNIbsG+L+T71APkVs5P/BhK8SOtPT+L/T5/3Pb8lJxX+739vgA/96Lq94fye5BAbUV7DQFOBO7N1/Yg8Lf5x4uqPxTb9Shfw335j4f2/xsohnN6QI1Fe30jN/WpB2g+4LFiqW1Wrpch/80kSZIkSZJUotI6RUuSJEmSJCkzDHgkSZIkSZJKnAGPJEmSJElSiTPgkSRJkiRJKnEGPJIkSZIkSSXOgEcqkBDCuSGEnyRdhySp8Oz5kpQN9nsVMwMeSZIkSZKkEmfAo8wLIbwrhHBPCGF1COHLIYTyEMJgCOEzIYSHQgi3hBDm5V97cgjhrhDC/SGE60MIrfnHl4UQbg4h3BdCWBVCODL/7RtCCNeGENaGEL4dQgiJHagkyZ4vSRlhv1cWGfAo00IIxwJvBc6KMZ4MTALvBOqBFTHG44Hbgb/Lf8k3gI/EGE8EHjjg8W8DX4gxngS8DNiWf/wU4ErgOOAI4KyCH5Qk6aDs+ZKUDfZ7ZVVF0gVICTsfOA34XT54rwV2AlPA9/Kv+RZwXQihGWiJMd6ef/wa4D9DCI1AV4zxeoAY4whA/vvdE2PcnL+/GugGfl34w5IkHYQ9X5KywX6vTDLgUdYF4JoY40f/4MEQPv6018UZfv/RAz6fxH9zkpQke74kZYP9XpnkFC1l3S3Am0MI8wFCCG0hhCXk/m28Of+adwC/jjHuBfaEEM7OP/5u4PYY4wCwOYRwSf57VIcQ6ub0KCRJ02HPl6RssN8rk0walWkxxodDCB8DbgohlAHjwBXAPuD0/HM7yc3hBbgM+Nd8c98AvC//+LuBL4cQPpn/Hm+Zw8OQJE2DPV+SssF+r6wKMc50VJqUXiGEwRhjQ9J1SJIKz54vSdlgv1faOUVLkiRJkiSpxDmCR5IkSZIkqcQ5gkeSJEmSJKnEGfBIkiRJkiSVOAMeSZIkSZKkEmfAI0mSJEmSVOIMeCRJkiRJkkqcAY8kSZIkSVKJM+CRJEmSJEkqcQY8kiRJkiRJJc6AR5IkSZIkqcQZ8EiSJEmSJJU4Ax5JkiRJkqQSZ8AjzVAI4agQwkgI4VtJ1yJJKowQwm35Xj+Y/3gk6ZokSZIOxoBHmrkvAL9LughJUsH9ZYyxIf9xdNLFSJIkHYwBjzQDIYS3AX3ALUnXIkmSJEmSAY90iEIITcAngf+RdC2SpDnxTyGE3SGE34QQzk26GEmSpIMx4JEO3f8CvhZj3Jx0IZKkgvsIcATQBXwF+HEI4chkS5IkSXomAx7pEIQQTgYuAD6TdC2SpMKLMd4dYxyIMY7GGK8BfgNcnHRdkiRJT1eRdAFSiTkX6AaeCCEANADlIYTjYoynJliXJGluRCAkXYQkSdLThRhj0jVIJSOEUAc0HfDQh8gFPn8eY9yVSFGSpIIIIbQALwVuByaAt5KbpnVKjPHRJGuTJEl6OkfwSIcgxjgEDO2/H0IYBEYMdyQplSqBfwCOASaBtcAlhjuSJKkYOYJHkiRJkiSpxLnIsiRJkiRJUokz4JEkSZIkSSpxBjySJEmSJEklzoBHkiRJkiSpxJXELlodHR2xu7s76TIkqeitXLlyd4xxXtJ1zJT9XpKmr9R7viRpdpVEwNPd3c2KFSuSLkOSil4IYVPSNbwQ9ntJmr5S7/mSpNnlFC1JkiRJkqQSZ8AjSZIkSZJU4gx4JEmSJEmSSlxJrMEjSdM1Pj7O5s2bGRkZSbqUgqqpqWHx4sVUVlYmXYokJSIr/R7s+ZKk6THgkZQqmzdvprGxke7ubkIISZdTEDFGenp62Lx5M0uXLk26HElKRBb6PdjzJUnT5xQtSakyMjJCe3t7qv/YDyHQ3t6eiXetJenZZKHfgz1fkjR9BjySUiftf+xDNo5Rkp5PVnphVo5TkvTCGPBIkiRJkiSVOAMeSZpFfX19fPGLXzzkr7v44ovp6+srQEWSpEKw30uSio0BjyTNomf7g39iYuI5v+5nP/sZLS0thSpLkjTL7PeSpGKT6l207t/cR3lZ4PjO5qRLkZSAT/z4IR7e2j+r3/O4zib+7nXHP+vzV111FY899hgnn3wylZWV1NTU0Nraytq1a3n00Ue55JJLePLJJxkZGeEDH/gAl19+OQDd3d2sWLGCwcFBLrroIl7+8pfz29/+lq6uLm644QZqa2tn9TjSZmvfMOt3DnL2UR2uVSFlkP1ekqSUj+D562vv57M3r0u6DEkZcvXVV3PkkUeyevVqPvWpT7Fq1So+97nP8eijjwLw9a9/nZUrV7JixQo+//nP09PT84zvsW7dOq644goeeughWlpa+MEPfjDXh1FyfnL/Vt7z9XsYGptMuhRJGWG/lyQVm1SP4GlvqKJncDTpMiQl5LneeZ0rp59+OkuXLn3q/uc//3muv/56AJ588knWrVtHe3v7H3zN0qVLOfnkkwE47bTT2Lhx45zVW6oaayoB6B8Zp7461Zc2SQdhv5ckKe0BT3019+1xETtJyamvr3/q89tuu42bb76ZO++8k7q6Os4991xGRkae8TXV1dVPfV5eXs7w8PCc1FrKmvYHPMMTLHJWrqQE2O8lSUlL9RSt3AiesaTLkJQhjY2NDAwMHPS5vXv30traSl1dHWvXruWuu+6a4+rSq6k2935F/8h4wpVIygr7vSSp2KR6BE9HQzWDoxOMjE9SU1medDmSMqC9vZ2zzjqLE044gdraWhYsWPDUcxdeeCH/+q//yrHHHsvRRx/NGWeckWCl6fL7ETwGPJLmhv1eklRsUh3wtNdXAdCzb4yuFnckkDQ3vvOd7xz08erqam688caDPrd/3YWOjg4efPDBpx7/0Ic+NOv1pVFTbS7gGRh57u2JJWk22e8lScUk5VO0cvOae52mJUmp1ljjFC1JkiRlW6oDnrb8CJ7d+9xJS5LS7KmAxylakiRJyqhUBzwdDfkpWo7gkaRUq64op6ayjH6naEmSJCmjUh3w7J+i1TPoCB5JSrummkoGnKIlSZKkjEp1wFNfVU51RRk9+xzBI0lp11RbSf+wI3gkSZKUTakOeEIIdDRUs9sRPJKUeo01FS6yLEmSpMxKdcAD0N5QRa8jeCTNkb6+Pr74xS/O6Gs/+9nPMjQ0NMsVZUdTTaWLLEuaM/Z7SVKxSX3A01Zf5SLLkuaMf/Anp6m20kWWJc0Z+70kqdhUJF1AobXXV/Po9oGky5CUhBuvgu0PzO73XPhiuOjqZ336qquu4rHHHuPkk0/mVa96FfPnz+f73/8+o6OjvPGNb+QTn/gE+/bt49JLL2Xz5s1MTk7y8Y9/nB07drB161bOO+88Ojo6uPXWW2e37gxoqqlwkWUpq+z3kiSlP+DpaKhi974xYoyEEJIuR1LKXX311Tz44IOsXr2am266iWuvvZZ77rmHGCOvf/3rueOOO9i1axednZ389Kc/BWDv3r00Nzfz6U9/mltvvZWOjo6Ej6I0NdbkFlm230uaC/Z7SVKxSX3A095QxdjEFIOjEzTWVCZdjqS59BzvvM6Fm266iZtuuolTTjkFgMHBQdatW8fZZ5/NBz/4QT7ykY/w2te+lrPPPjvROtOiqbaCsckpRiemqKksT7ocSXPJfi9JUgYCnvpqAHoGxwx4JM2pGCMf/ehH+bM/+7NnPLdq1Sp+9rOf8bGPfYzzzz+fv/3bv02gwnRpyvf4/uFxAx5Jc8p+L0kqBqlfZLm9oQqAHnfSkjQHGhsbGRjIrfv1mte8hq9//esMDg4CsGXLFnbu3MnWrVupq6vjXe96Fx/+8IdZtWrVM75Wh66pNh/wuA6PpDlgv5ckFZvUj+DpaNg/gmc04UokZUF7eztnnXUWJ5xwAhdddBHveMc7OPPMMwFoaGjgW9/6FuvXr+fDH/4wZWVlVFZW8qUvfQmAyy+/nAsvvJDOzk4X3ZyBpprcJc2dtCTNBfu9JKnYhBhj0jU8r+XLl8cVK1bM6Gu39g3zsqt/xT+96cW8/fTDZ7kyScVmzZo1HHvssUmXMScOdqwhhJUxxuUJlfSCvZB+v3LTHv74S7/l39/3Es49ev4sVyap2GSp30M6e74kaXalfopWW31+ipYjeCQp1ZprHcEjSZKk7Ep9wFNTWU5jdQW7B12DR5LS7MBFliVJkqSsSX3AA7mFll1kWcqOUph6+kJl4RgPlYssS9mTlV6YleOUJL0wGQl4qund5xQtKQtqamro6elJ9R/DMUZ6enqoqalJupSiUl1RRlV5GQNO0ZIyIQv9Huz5kqTpS/0uWgDt9VU80TuUdBmS5sDixYvZvHkzu3btSrqUgqqpqWHx4sVJl1FUQgg01lQ4RUvKiKz0e7DnS5KmJxsBT0MVq57oS7oMSXOgsrKSpUuXJl2GEtJUW+kiy1JG2O8lSfpD2ZiiVZ+bojU1le4hvJKUdU2O4JEkSVJGZSPgaahiKkKff/RLUqrlRvDY6yVJkpQ9GQl4qgHoGXShZUlKs6aaShdZliRJUiZlIuDpqK8CcKt0SUo5F1mWJElSVmUi4Pn9CB4DHklKM6doSZIkKasyEfC0PTWCxylakpRmTTUVjIxPMToxmXQpkiRJ0pzKRMDTWldJCLDbETySlE6DO2HTnTTVVAC4Do8kSZIyJxMBT0V5Ga11VS6yLElptfo78G8X0lqZm55lwCNJkqSsyUTAA9BeX+UaPJKUVnXtALSFQQAXWpYkSVLmZCfgaaii1120JCmd6toAaGEAwIWWJUmSlDnpDnjW/BjW3QzkdtLa7SLLkpRO+RE8TbEfgP5hp2hJkiQpW9Id8Nz+/8E9XwacoiVJqZYPeBqm8gGPI3gkSZKUMekOeJq6oH8rAO311ewdHmdsYirhoiRJs642N0WrbrwPgAEDHkmSJGVMQQOeEEJLCOHaEMLaEMKaEMKZIYS2EMIvQwjr8retBSugqRP6twC5NXgA9gw5ikeSZlvi/b62BQhUjfVRFpyiJUmSpOwp9AiezwE/jzEeA5wErAGuAm6JMR4F3JK/XxhNnTC8B8aG6MgHPLvdKl2SCiHZfl9WDrWthOFemmornaIlSZKkzClYwBNCaAbOAb4GEGMcizH2AW8Arsm/7BrgkkLVQFNX7nZgG+0N1QDupCVJs6wo+j3kdtIa6qGpptJt0iVJkpQ5hRzBsxTYBfxbCOHeEMJXQwj1wIIY47b8a7YDCw72xSGEy0MIK0IIK3bt2jWzCpo6c7f9W2ivz43gcaFlSZp1L6jfz5q6dhjqpam2gv4Rp2hJkiQpWwoZ8FQApwJfijGeAuzjacPzY4wRiAf74hjjV2KMy2OMy+fNmzezCvaP4Onf+tQIHqdoSdKse0H9flYCfcgttDzUS2N1pYssS5IkKXMKGfBsBjbHGO/O37+W3P8A7AghLALI3+4sWAVNi3K3/VtoqqmgsjzQ4xQtSZptL6jfz0qgD/kRPD25ETwusixJkqSMKVjAE2PcDjwZQjg6/9D5wMPAj4DL8o9dBtxQqBqoqoeaFujfSgiBtvoqehzBI0mzqij6PeTW4Bnupam6wkWWJUmSlDkVBf7+/x34dgihCtgAvI9cqPT9EML7gU3ApQWtoKkL+rcC0F5f7Ro8klQYyff7ujaYGKG9etJFliVJkpQ5BQ14YoyrgeUHeer8Qv53/0BTJ/RvAaC9ocopWpJUAEXR7+vaAZhfPsS+sUkmJqeoKC/kTGRJkiSpeKT/L9+mzqdG8HQ0VNOzzylakpRK+YCnvWwAgMFR1+GRJElSdmQg4OmCfbtgYpT2+iqnaElSWtW2AdAWcgGPCy1LkiQpSzIQ8HTmbge20dZQxdDYJENj/tEvSamTH8HTTD7gcaFlSZIkZUh2Ap7+rXTUVwOwe8BRPJKUOnW5ETyNU/0ALrQsSZKkTMlAwNOVu+3fyqKWGgC27R1OsCBJUkHUtADhqYBnz5ABjyRJkrIjAwHP/hE8W+hqqQVgS58BjySlTnkF1LZQP7kXgN2DLqovSZKk7Eh/wFPTBFWN0L+VznzAs9WAR5LSqbaN6vE+yssCuwYMeCRJkpQd6Q94IL9V+hZqKsvpaKhyBI8kpVVdO2XDvbTXVxnwSJIkKVMyFPBsBaCzpZYtfSMJFyRJKoi6dhjqYV5jNbucoiVJkqQMyUjA0/VUwNPVUsuWPUMJFyRJKoi6NhjqzQU8juCRJElShmQk4OmEge0wOU5nSy1b+0aIMSZdlSRptuUDno6GahdZliRJUqZkJ+AhwuAOOltqGR6fpM/tcyUpferaYWKYRXWR3YOjTE0Z5kuSJCkbMhLwdOVu+7e6VbokpVltGwCLq4cZn4zsHTbMlyRJUjZkJODpzN32bzHgkaQ0q2sHYGHlPgAXWpYkSVJmZCzg2UpXaz7g2WPAI0mpU5cbwTOvbBCA3S60LEmSpIzIRsBT2woVtdC/lda6Smoqy9jqCB5JSp/8CJ62MkfwSJIkKVuyEfCEkBvF07+FEEJuq3QDHklKn3zA0xz7AdwqXZIkSZmRjYAH8gHPVoD8VukGPJKUOjUtuZvxPqoqygx4JEmSlBkZCni6ngp4FrfWsqVvJOGCJEmzrrwCaloIw73Ma6g24JEkSVJmZCjg6YSBbTA1SWdzLbsHRxkZn0y6KknSbKtrh6EeOhqrXYNHkiRJmZGtgGdqAvbtemonrW17HcUjSalT1wZDjuCRJElStmQo4OnK3fZvobPFrdIlKbXyI3jmNVaz2xE8kiRJyogMBTydudv+rXTlAx4XWpakFKrNj+BprKZn3xgTk1NJVyRJkiQVXIYCnv0jeLaysLmGEGCzAY8kpU9dGwz3Mq+hihihd2gs6YokSZKkgstOwFPXDuVV0L+VyvIyFjTWOIJHktKorh3Gh1hQFwFch0eSJEmZkJ2Ap6wMGhc9tVV6V2utAY8kpVFdGwALK4cAAx5JkiRlQ3YCHshN08oHPJ0ttWwx4JGk9KlrB2Be2T7AgEeSJEnZkLGApxP6twDQ1VLLtr4RpqZiwkVJkmZVPuBpDf0A7B50DR5JkiSlXwYDnq0QI10tNYxNTrmFriSlTf08AGpGe6mvKncEjyRJkjIhYwFPF0yOwlAPXa25rdKdpiVJKdO4MHc7sI15jdXsMsiXJElSBmQr4Gk5PHe7ZyOdLQY8kpRK1Y1Q1QAD23MBz8BI0hVJkiRJBZetgKd9We6257GnAh530pKkFGpceEDA4wgeSZIkpV+2Ap7Wbghl0LOepppKGmsq2NrnO7uSlDoN+YCnodpFliVJkpQJ2Qp4KqqgZQn0rAdyO2lt3uMIHklKncaFMLCNjoZq9g6PMzoxmXRFkiRJUkFlK+CB3DStAwIep2hJUgrtn6LVUAW4VbokSZLSL6MBz2MQI50ttS6yLElp1LgQJoZZWDMO4Do8kiRJSr0MBjxHwvg+GNhOV2ste4fHGRydSLoqSdJsalwEwKKyPQDsNuCRJElSymUw4Nm/k9b6p3bS2uYoHklKl8aFALTHXMCza9CAR5IkSemW6YCnKx/wbDbgkaR0acgFPM0TuwGnaEmSJCn9shfwNHVBRc0fBDwutCxJKdO4AIDKoZ201lWyo38k4YIkSZKkwspewFNWBm1HQs9jzG+sprI8sMWt0iUpXaoboaoRBrazoKnGgEeSJEmpl72AB3ILLfesp6ws0NVSyxO9Q0lXJEmabY0LYWAbC5tr2G7AI0mSpJTLaMCzDPY8DpMTLGmvZ1OPAY8kpU7jQhjcwaLmGrbvNeCRJElSumU34JmagL5NdLfXsbFnHzHGpKuSJM2m/AieBU017B4cY2xiKumKJEmSpILJbsAD0PMYS9rrGRiZYM/QeLI1SZJmV+NCGNjOoqZqAHYOOIpHkiRJ6ZXxgGc93R11AGzs2ZdgQZKkWdewECZG6KodA3CaliRJklItmwFPXRvUtEDPepa01wOwyYBHktKlcSEAneV7AVxoWZIkSamWzYAnhNwonp71LG6tpSzAxt0utCxJqdK4CID59AGO4JEkSVK6ZTPggXzA8xjVFeV0ttQ6gkeS0iY/gqd+bCc1lWUGPJIkSUq1bAc8/ZthbIgl7XVsdKt0SZqxEMLGEMIDIYTVIYQV+cfaQgi/DCGsy9+2zmlR+YAnDGxnYVONU7QkSZKUahkOeI7M3fZuYEl7vSN4JOmFOy/GeHKMcXn+/lXALTHGo4Bb8vfnTlU9VDfB4A4WNteww4BHkiRJKVbQgKco39Hd78CdtNrr2DM0zl63Spek2fQG4Jr859cAl8x5BY0LYWAbC5tq2OYULUmSJKXYXIzgKa53dPdrOyJ3e+BOWr2O4pGkGYrATSGElSGEy/OPLYgxbst/vh1YcLAvDCFcHkJYEUJYsWvXrtmtqmEBDGxnQXMNO/tHmZqKs/v9JUmSpCKRxBSt5N/RBahugMZO6HmM7nzA4zo8kjRjL48xngpcBFwRQjjnwCdjjJFcCPQMMcavxBiXxxiXz5s3b3aralwEA9tY1FTD2OQUvUNjs/v9JUmSpCJR6K/HVJ4AACAASURBVICnON/R3a/9SOhZz+FtdQBs2u0IHkmaiRjjlvztTuB64HRgRwhhEUD+duecF9a4EAZ2sLCpGnCrdEmSJKVXoQOe4nxHd7/2ZdCzntqqchY21TiCR5JmIIRQH0Jo3P858GrgQeBHwGX5l10G3DDnxTUuhMlROmtGAVxoWZIkSalVUchvfuA7uiGEP3hHN8a4LbF3dPdrXwbDvTDUy5L2OnfSkqSZWQBcH0KA3HXlOzHGn4cQfgd8P4TwfmATcOmcV5bfKr2zrA/AhZYlSZKUWgULePLv4pbFGAcOeEf3k/z+Hd2rSeod3f2e2kkrtw7PLWuTy5okqVTFGDcAJx3k8R7g/Lmv6ACNiwBoneyhLDiCR5IkSelVyBE8xfuO7n4HbJW+pOMl7B4cZXB0gobqgg5skiTNlfwInvKhncxrnO8aPJIkSUqtgiUZRf2O7n6tSyCUQ886uuefC8Cmnn0c39mcbF2SpNnRkAt4GNjGwuYlbHcEjyRJklIqiW3Si0d5Jcw7GrY/wJL2/E5aLrQsSelRVQc1LbB3Cwubqh3BI0mSpNTKdsAD0HUqbFnJkvxW6RtdaFmS0qW1G/ZsZFFzrSN4JEmSlFoGPJ2nwlAPDcNb6GioZtNuR/BIUqrkA54FTTUMjEywb3Qi6YokSZKkWWfA03Va7nbLSrrb6xzBI0lp09oNfU+wsCm37JyjeCRJkpRGBjwLjofyatiyiiXt9a7BI0lp07YUpsY5vHwvADtch0eSJEkpZMBTXgmLTnpqBM/2/hGGxyaTrkqSNFtauwHojNsA2GbAI0mSpBQy4IHcNK1t99HdVg3AE72O4pGk1GhdCkD72FbAKVqSJElKJwMeyAU840McXbYFcCctSUqVpi4oq6Cq/wmaayvdKl2SJEmpZMADua3SgcVDawDYZMAjSelRXgHNh8Gex1nYVOMIHkmSJKWSAQ9A2xFQ00LdrtW01lWy0YWWJSld2pbCno0sbK5h297hpKuRJEmSZp0BD0AIuVE8T+2k5QgeSUqV1m7ofZzu9jo27R4ixph0RZIkSdKsMuDZr+s02PkwL2oNbNztCB5JSpXWpTDSx9HNkwyMTrB7cCzpiiRJkqRZZcCzX9dpECdZXr2ZrXuHGRqbSLoiSdJsyW+VfnRNLwAbdg0mWIwkSZI0+wx49uvMLbR8avkGYoTVT/YlXJAkadbkA54lYScAj+92Kq4kSZLSxYBnv8YF0HwYh4+sJQRYsXFP0hVJkmZLPuBpG9tKVXmZAY8kSZJSx4DnQJ2nULXjXo5e0MiKTQY8kpQaNU1Q107ZnsdZ0l7HBgMeSZIkpYwBz4G6ToM9Gzm7K7Bq0x4mp9xlRZJSozW3VfrSjnpH8EiSJCl1DHgO1HUaAOc1bmZwdIK12/sTLkiSNGtau2HP4yydV8+mnn2G+JIkSUoVA54DdZ4MBI6P6wFY6TQtSUqPtqWwdzNHtlUxPhnZsmc46YokSZKkWWPAc6DqRph3DE2997OwqYbfudCyJKVHazfEKY6uyY3O3LDbrdIlSZKUHtMKeEIIHwghNIWcr4UQVoUQXl3o4hLRdRphy0qWL2lh5cbepKuRpDmX2p6/f6v0sh2AW6VLkiQpXaY7gudPYoz9wKuBVuDdwNUFqypJh50OQz2c37GHrXtH2NLnEH5JmZPOnt+6FIDmkS00VlcY8EiSJClVphvwhPztxcA3Y4wPHfBYuhxxLgBncD8AKxzFIyl70tnzGxdBeTUhv9CyAY8kSZLSZLoBz8oQwk3k/tj/RQihEZgqXFkJal0CbUewYPedNFRXsMJ1eCRlTzp7fllZrsfnt0rfsMuAR5IkSekx3YDn/cBVwEtijENAJfC+glWVtCPOo2zjb1h+WD2/cwSPpOxJb89vXQo9Gziio4Gte4cZGZ9MuiJJkiRpVkw34DkTeCTG2BdCeBfwMWBv4cpK2JHnwfg+/qh1C4/sGGDv8HjSFUnSXEpvz59/LOx+lCPaq4gRNvUMJV2RJEmSNCumG/B8CRgKIZwEfBB4DPhGwapKWvfZEMo4I95HjHDvE07TkpQp6e35C46HqXGOqdi/k5ZbpUuSJCkdphvwTMQYI/AG4P/GGL8ANBaurITVtkDXaXT23kV5WXAdHklZk96ev+B4AA4b3wDABhdaliRJUkpMN+AZCCF8lNxWuT8NIZSRW5MhvY44j/Jt93L6wsCKTa7DIylT0tvz24+Csgpqetcyv7Gax11oWZIkSSkx3YDnrcAo8Ccxxu3AYuBTBauqGBx5HsQpLmnZwOon+xibKP0NZCRpmtLb8yuqoONo2PEQSzvcKl2SJEnpMa2AJ/8H/reB5hDCa4GRGGM61mN4NotfAlUNnBHvZ2R8ioe2pmN9UUl6Pqnv+QuOgx0Pc8Q8Ax5JkiSlx7QCnhDCpcA9wFuAS4G7QwhvLmRhiSuvhO6X09V7FwD3PO40LUnZkPqev+B46N/MMc1T9OwbY8++saQrkiRJkl6w6U7R+hvgJTHGy2KM7wFOBz5euLKKxBHnUbF3I+fMG+S/1u1OuhpJmivp7vnzcwstn1S9FYA12/uTrEaSJEmaFdMNeMpijDsPuN9zCF9buo58JQBva1vPPY/3sm90IuGCJGlOpLvn53fSOnJqIwBrtw0kWIwkSZI0O6b7B/vPQwi/CCG8N4TwXuCnwM8KV1aR6DgKmro4fep+xianuGtDT9IVSdJcSHfPb+qEmmYa9z5KR0MVa7Y5gkeSJEmlb7qLLH8Y+ApwYv7jKzHGjxSysKIQAhxxHu277qK+MnDbI7uSrkiSCi71PT8EWHAC7HyYYxc1OUVLkiRJqVAx3RfGGH8A/KCAtRSnI88jrP4Wb1/cwy8erSHGSAgh6aokqaBS3/MXHA+r/4NjT2rk3+/cxMTkFBXl6ZmFJkmSpOx5zr9mQwgDIYT+g3wMhBCy8Zbn0lcAgdfWPcSTvcNs7BlKuiJJKohM9fz5x8HYAKc2DzA2MeV26ZIkSSp5zzmCJ8bYOFeFFK2GeXDYSzmu7zbgbG57ZCdLO5YmXZUkzbpM9fwFJwBwfPmTQC0Pb+vnqAXZOXxJkiSlj+PRp+P4S6jqWcMr2vZw+6OuwyNJJW/+MQAsGt1AZXlgjTtpSZIkqcQZ8EzHsa8H4LLm1dz5WA8j45MJFyRJekGqG6G1m4pdD3PkvAbWutCyJEmSSpwBz3Q0d8FhL+X04f9idGKKux/vTboiSdILNf942PEwxy1qcqt0SZIklTwDnuk67hIa+tZydMV2bntkZ9LVSJJeqAXHQ886TphfxY7+UXr3jSVdkSRJkjRjBjzTddwbAPh/2u93HR5JSoOu0yBOsbzycQBH8UiSJKmkGfBMV3MXLD6dV07+lg279vFkr9ulS1JJO/ylABw58gBgwCNJkqTSZsBzKI5/I+2Dj9IdtnGbo3gkqbTVtsL846jf/jvmNVa7k5YkSZJKmgHPochP03p7/Spuf8SAR5JK3uFnwJP3cNzCekfwSJIkqaQZ8ByK/DSt11Xew2/W73a7dEk6QAihPIRwbwjhJ/n7S0MId4cQ1ocQvhdCqEq6xmc4/GUw2s85zTtZv3OQ8cmppCuSJEmSZsSA51Adfwmdw+uYP7GFOzf0JF2NJBWTDwBrDrj/z8BnYozLgD3A+xOp6rkcfgYAy8MjjE1OsWHXvoQLkiRJkmbGgOdQ5adpvaHyHm5ZsyPhYiSpOIQQFgN/BHw1fz8ArwSuzb/kGuCSZKp7Di2HQdNijhi6H4CVm/YkXJAkSZI0MwY8h6p5MSx+CW+uWcGv1uwkxph0RZJUDD4L/DWwf45TO9AXY5zI398MdB3sC0MIl4cQVoQQVuzalcD6ZoefQcPOFSxqquYOF9CXJElSiTLgmYkT38rhY+uZ1/8gD7sop6SMCyG8FtgZY1w5k6+PMX4lxrg8xrh83rx5s1zdNCw5kzCwjUuWTvCbx3Yz4To8kiRJKkEFD3hKctHN53PS25iqauCyipu4Zc3OpKuRpKSdBbw+hLAR+C65qVmfA1pCCBX51ywGtiRT3vM4/EwALmx8nIGRCVY/2ZdwQZIkSdKhm4sRPKW36ObzqW6k7OR38rryu1jx8CNJVyNJiYoxfjTGuDjG2A28DfhVjPGdwK3Am/Mvuwy4IaESn9u8Y6GmmWPHHqIs4DQtSZIklaSCBjwlu+jmdJz+p1QywYnbf8jOgZGkq5GkYvQR4H+EENaTW5PnawnXc3BlZXDYGVRtvYeTD2vhdgMeSZIklaBCj+Ap3UU3n0/HUQwuPod3VtzCbQ8X56wDSZprMcbbYoyvzX++IcZ4eoxxWYzxLTHG0aTre1aHnwG7H+E1S6u4f8teeveNJV2RJEmSdEgKFvCU/KKb01D/8j9nUeild+UPky5FkvRCLHkZAK+qX0+M8Ov1uxMuSJIkSTo0hRzBU9qLbk5DeNFr6K3q5LQd/8nI+GTS5UiSZqrrNKhqpLvvLlrqKl2HR5IkSSWnYAFPyS+6OR1l5ew57t28JKzh/pW/SboaSdJMlVfCEa+g7LFfcdaR7dzx6C5ijElXJUmSJE3bXOyi9XSlsejmNHW98nKGYxXhnq8kXYok6YVYdj7sfYLXdg6yc2CUtdsHkq5IkiRJmrY5CXhKdtHNaahp6mBF0wW8uPcXxKHepMuRJM3UkecDcBarAdxNS5IkSSUliRE8qTN0yvupYYxdN3826VIkSTPVugTaj6Jp8+2cuLiZH9+3NemKJEmSpGkz4JkFL3npOdwYz6Tp3i/DwPaky5EkzdSyC2DTb/jjF7fz0NZ+Ht3hNC1JkiSVBgOeWdBWX8Xul36Esqlxtt3w90mXI0maqWUXwMQIl7RtpLwscP29JbvRoyRJkjLGgGeWvOVV5/CjyguZv/57jO9Ym3Q5kqSZ6D4LKmpo3nIH5xzVwQ33bmFqyt20JEmSVPwMeGZJTWU57Rd/jKFYzdZrr0q6HEnSTFTWwpKXwfqbeeOpi9m6d4S7Hu9JuipJkiTpeRnwzKJzTzmWG1vexpJdt7J37e1JlyNJmollF8DuR3l15xgN1RVcv8ppWpIkSSp+BjyzKITAqW/5n2yPrez90UchOqxfkkrOsgsAqNn0Ky48YSE3Prid4bHJhIuSJEmSnpsBzyxbtng+93T/Nw4feognfvPdpMuRJB2qjhdByxJY+1PedEoXg6MT3LxmR9JVSZIkSc/JgKcAXnHplaznMKpu/XumRoeSLkeSdChCgOPfCBtu44yFgUXNNVy3anPSVUmSJEnPyYCnAJrra3jyjE+wcHI7D3//b5MuR5J0qE54E0xNUPbIj3nDyV3csW43uwZGk65KkiRJelYGPAVy7mvexO21F3D0Y1+n5/H7ki5HknQoFp4IbUfCg9fxplO7mJyK/Pi+rUlXJUmSJD0rA54CCSGw5O2fZjDWsuf7V8DUVNIlSZKmK4TcKJ6N/8WL6oc5oauJ6+91Ny1JkiQVLwOeAuo+fAkrX/RXLBt+gDU3fjHpciRJh+L4N0Gcgodv4I2nLOaBLXtZt2Mg6aokSZKkgzLgKbBzLv0r7i8/nq7f/RP7erclXY4kaboWHAfzjoGHruf1J3VSXha4zlE8kiRJKlIGPAVWVVlO2es+S00c5rFvX5l0OZKkQ3H8m2DTb5kXeznnqA5+eO8WpqZi0lVJkiRJz2DAMwdOOPl0fr3gXZzY83Meu+snSZcjSZquE94ERHj4h7zx1MVs2zvCXRt6kq5KkiRJegYDnjly2rv/kU2hk7pf/BUj+/qTLkeSNB0dR8HCF8N9/8Grj51PQ3WF07QkSZJUlAx45khzYyN9F/xvFsWdrL7mQ0mXI0marpf8KWy7j5onbuPiFy/kxge2MTAynnRVkiRJ0h8w4JlDJ511MXd3vInTd3yfB+66OelyJEnTcdLboakLbv8U73rp4ewbm+Q7dz+RdFWSJEnSHzDgmWMvvux/s6usnYZfXMnA4GDS5UiSnk9FFZx1JTx5FydOPsRZy9r56q8fZ2R8MunKJEmSpKcY8MyxusY2+s//FEvjk9x9zd8kXY4kaTpOfTc0LIA7PsVfnLuMXQOjXLfKtXgkSZJUPAx4EnDUy9/EQx0X8oqd3+TuO29PuhxJ0vOprIWX/XfYcBsvq97ASYub+fIdjzExOZV0ZZIkSRJgwJOYZe/5P+wra6Djpr9kZN/epMuRJD2f094HtW2EO/6FPz/3SDb1DPGzB7cnXZUkSZIEGPAkprppPlte+Xm6p55ky79dBlO+CyxJRa26Ac68Atb9glfXb+DIefV86bbHiDEmXZkkSZJkwJOk48++hGvb/xtH7r6Vfb/8x6TLkSQ9nzP+Apq6KPvFVfy3c7pZs62fO9btTroqSZIkyYAnaae//WNcN3UO9Xf+Czx8Q9LlSJKeS1UdvOqTsP1+LuFWOhqq+Oadm5KuSpIkSTLgSdrSeQ2sXf5J7p1axtR1fwbbH0y6JEnScznhj+HwM6m89R94zykt/GrtDjbvGUq6KkmSJGWcAU8RuOKCE/hw+V/TN1VH/O7bYe/mpEuSJD2bEOCif4ahHt438Z8A/Mc9TyRclCRJkrLOgKcINNdV8q4LTue9w1cysa8Xvn4R9G5IuixJ0rNZdBKc+h4a7/sa7zxihO/97klGJyaTrkqSJEkZZsBTJN55xhL2dZzIX1Z8gjg2mAt5dq5NuixJ0rN55cehupEPjf5fegdH+LlbpkuSJClBBjxForK8jL973fH8Ys8i/nH+vxCJ8O8Xw9bVSZcmSTqYhnlw4dU0717FB5pu41t3udiyJEmSkmPAU0TOedE8rrroGL76SA1fXfYFqKyDa14Hm+5MujRJ0sGc+FZYdgF/Mflttm16lDXb+pOuSJIkSRllwFNk/uycI3j76Yfxj3eN8ZPlX4eG+fCNN8CD1yVdmiTp6UKA136WirIy/rnqq/z5N1ewctOepKuSJElSBhnwFJkQAp98wwmcfVQHV964m3te+V3oOhWufR/8+jMQY9IlSpIO1HIY4VWf4KzwABeO3cRb/vW3/PPP17rosiRJkuaUAU8Rqiwv4wvvPJUj5tXz/v/cwPrXfBNO+GO4+e/hJ1fC5ETSJUr6/9u77zi5q3r/46+zvbdsTTZl03snnd47SBEEDMpVUe4VRf2h3mtDvdfeBWkKKNJ7EYi0AAmppG56ssn23uvszPn9cSaNFFImmf3uvp+Pxzyy852Z735mHpPPnP3MOZ8jsq/pt0DB6dzJ3/jq2E7ueWcbn7l/CR0+FXlERERE5ORQgaeHSomL5q83n0JsVCTX/W0VhbN/A6d+A1Y8BP+8Blqqwh2iiIjsFhEBVz2IiU/na3V38edPDWXFznq+9fQarGZeioiIiMhJoAJPD5afnsATX5pFdGQE192/hBXD/wsu+yMUfQB3z4INL4U7RBER2S0pC655CBqLuXj7T7jz/FG8tLqM37+5JdyRiYiIiEgfoAJPDzcsK4mnbp1NRmIMNz24hPeTL4IvLYTUfHjiRnjuVuhoDHeYIiICMGgWnPtj2Pgyt0a+wNXT8vndv7fwwqrScEcmIiIiIr2cCjwekJ+ewJO3zmZQRgKff2gZr1WlwX+8Caf9P1jzJNw9B4reD3eYIiICMOvLMP4qzFt38bPsBcwoyOBbT69hTUlDuCMTERERkV5MBR6PyE6O4/EvzmJs/xRu/ccKbn7kI1YO/wrcsgCiYuHhS2HhLyEQCHeoIiJ9mzFw5b0w4Vqi3v4xDw98hazEGL709xXUtHSGOzoRERER6aVU4PGQtIQYHvvCLP7fBaNYXdzAp+5exE2vd/PRRc/DuE/BWz+BR6+Clupwhyoi0rdFRrsiz/RbiF/6R14qeIrWtha+8uhKfH4V4kVEREQk9FTg8Zj4mEi+csZw3r/zLL5z4WgKy5q48oG13O67jfYLfuMaMP9lnvtXRETCJyICLv41nPoNMjY+xgdpP8RXtISfvrIh3JGJiIiISC+kAo9HJcZG8aXTh/HenWdy+9kjeHltBectLGDDJc9BbBI8chmseizcYYqI9G3GwNnfhxufIdl08WzsDxmw9Cc8v2x7uCMTERERkV5GBR6PS4iJ4uvnjuTJL83CWrjkqUbuHfUAdvA8eP5WePeXYG24wxSRXswYE2eMWWqMWW2MWW+M+VHweIExZokxZqsx5gljTEy4Yw2b4efAVxZjp32OL0S9Sv7L17N1565wRyUiIiIivYgKPL3EtMEZvHr7qVw0IY//e6uM+V3fxDfuWnj7J/DS7eDvDneIItJ7dQJnWWsnAZOBC4wxs4CfA7+11g4H6oFbwhhj+MWlEHHpb2m86C9MNFuJefgCWiu2hjsqEREREeklVODpRVLiovnDdZP5xVUTWbSjmasqPkv7rK/Dyofh8euhtTbcIYpIL2SdluDV6ODFAmcBTwePPwxcEYbwepzUGdez+bx/kOJvIHD/2djiZeEOSURERER6ARV4ehljDNeeMpB7b5rGxsoWLis8k6Zzfglb/w2/Gw//+jY0loQ7TBHpZYwxkcaYVUAVsADYBjRYa3dPHywBBoQrvp5m/JwLeWXGI9T5YvD/7WIofDHcIYmIiIiIx6nA00udPSaHhz53CmUN7Vy8aDhln3kLxl4OS++D30+GF26Dmi3hDlNEeglrrd9aOxnIB2YAo4/0scaYLxpjlhtjlldXV5+wGHua6y88mz8MvYc13QOxT34WFv1RPdNERERE5JipwNOLzRmWyaNfmEVTezdXPlXL33O/TcXNi2HazbD2afjTdPjndbBjof6oEJGQsNY2AG8Ds4E0Y0xU8KZ8oPQQj7nPWjvdWjs9KyvrJEUafhERhp/ecCa/yPkl/wrMhDf+xxXf2+vDHZqIiIiIeJAKPL3c5IFpPPml2STHRfO9F9Yz656tXLDlMu6e9Bx1p9wBJUvh4Uvh3lNh9RMQCIQ7ZBHxGGNMljEmLfhzPHAusAFX6Lk6eLf5wAvhibDniouO5J6b5/Hr5Du5j6uwqx/H/4dpNH34CJ0+NccXERERkSNnrAdmbkyfPt0uX7483GF4mrWW7TWtvLmhkn9vqGLFznpioyL46w3jmdXyJiz+M9RsgnFXwhX3QHR8uEMWkWNgjFlhrZ1+kn/nRFwT5UjcFwdPWmvvMsYMBR4HMoCPgButtZ2HO1dfzffFdW1cefcislo389PoB5kasZUVdjRNU77I6RffRER0391hXkQOLRw5X0REeq4TVuAxxsQBC4FYIAp42lr7A2NMAW7A3w9YAdxkre063Ln66oD/RCpvbOfmvy5jR20rd39mKueMyYZFf4AF34f8GXD9Y5CYGe4wReQoeX2w35fzfUVjB0uL6ujo8jFox1OM2HQP/fw11EX0w0y9ifTpV0POeDAm3KGKSA/h9ZwvIiKhdSILPAZItNa2GGOigfeB24E7gGettY8bY/4CrLbW3nO4c/XlAf+JVN/axc1/W8r6siZ+fe0kLp88AApfgGe/CMl5cMNTkDki3GGKyFHw+mBf+X4v6/fxwb8ewy7/K3PtKiKMheT+MOJcmPFFyB0f7hBFJMy8nvNFRCS0TlgPHuu0BK9GBy8WOAt4Onj8YeCKExWDHF56YgyPfmEW0wan87UnVvHPJbvcTlvzX4bOZnjgHFj/HHQfdoKViIicACYymnmXfJZRd7zGtYl/5SeRt9GVOwXWPQP3nQELfwV+9ekREREREeeENlk2xkQaY1YBVcACYBvQYK3dPSItAQYc4rF9ctvcky0pNoqHPz+DM0dl893n1nLNXxbxSn0+vs8vgKQceOpm+NUIeOl2KPpATZhFRE6y7JQ4fnjD2TzcMY/b/Hdgb18DYy6Bt34Mfz0fylZpJ0QREREROTlNloO7qzwHfA94yFo7PHh8IPAva+1h55lryv6J19Ud4JHFRTy8uIjiunZyU+KYP7M/V6VtJavoRczGV8DX6pYHDD0DCk6FgtMgNT+8gYvIfrw+XV/5/tAeeG87P3llAz++Yjw3zRoMa5+GV74BHQ1uWe2ws2DURTD6YvXpEekjvJ7zRUQktE7aLlrGmO8D7cCdQK61ttsYMxv4obX2/MM9VgP+k8cfsLy9sYqHFxfx3pYaAJJjo5icG8Pl8auY61tMbv1yTFute0B6AUy7GWbfBpHRYYtbRByvD/aV7w8tELDc/NAylmyv5edXTWTKoDQGxrQRseVfsO0t2Pa2K/aMOB8u+wMk54Y7ZBE5wbye80VEJLROZJPlLMBnrW0wxsQDbwA/B+YDz+zTZHmNtfbuw51LA/7w2F7dwpIddRSWNVFY3sSG8ibauvx8Yd5gvjvdYoreh82vwfZ3IHscXPo7GDgj3GGL9GleH+wr3x9edXMnl//pfcoaOwBXgJ+Qn8qcYf2YMzSdSeVPE/nmDyEqFi76FYy7EiKjwhu0iJwwXs/5IiISWieywDMR10Q5Etfr50lr7V3GmKG4bdIzgI+AG621nYc7lwb8PUMgYLnr5UIeWlTEV88ewR3njnQ3bHwVXv0WNJXAtM/BOT+A+PTwBivSR3l9sK98/8k6u/1sqmimsKyJ9WVNLN9Zz4byJgCyk2P5xxX9GLnom1C6HKITIG8y5E+D0Ze6IryWb4n0Gl7P+SIiElonbYnW8dCAv+cIBCzfeXYtTywv5s4LRvPlM4a5Gzpb4J3/gw/vhrg0OOU/3CU5J7wBi/QxXh/sK98fm9qWThZvr+V/X9lAc2c3D82fyrS292HXEihZBhVrwN8FeZPcFuvjr4Lo+HCHLSLHyes5X0REQksFHjlq/oDljidX8cKqMn546Vhunluw98by1fDOz2DTv1xPngnXwKyvQO5h+2iLSIh4fbCvfH98ShvaueH+D6lq7uT+z05n7vBMd0NnC6x5ApbeD9UbIDIG+k9xM3qGnArDztZSLhEP8nrOFxGR0FKBR46Jzx/gP/+5ktfXV/LN80byIM/6dAAAIABJREFUpdOHER0ZsfcOtdvgw3tg1aPga3NLBMZdAWMvh4yh4QtcpJfz+mBf+f74VTV3cNMDS9lR08p3LhrN/NlDiIgILsuyForegy0LoHgplH0E/k5IyYdTPg9Tb4bEfkf+ywJ+qN0KmSO19EskDLye80VEJLRU4JFj1tnt544nV/PKmnLG5KXw86smMDE/bf87tdXBqn/C+mehdIU7ljsBxl8N0+arV49IiHl9sK98HxoNbV3c/vgq3t1czSlD0vn5VRMZmpUEuKW2xoAxBro7Yeu/Ycm9sONdiIiGjAJXiM8YCkPPOHB2j7VQud7NCFr7NDSXwdTPwiW/g4jIsDxfkb7K6zlfRERCSwUeOW6vravg+y+so6alk8/NLeCOc0eSGHuQqf4Nu6DwRSh83vWEiE6EKTfCrC+7PyhE5Lh5fbCvfB861lqeWVnKXS+tp7M7wMCMBOpbu6hv62JkTjJ/vmEqw4JFHwCqNsCaJ6FmM9QXQd12NwMzKRcmXw+pA2HXYti5CJpKISIKhp/req2teAjGXAqfegCi4w4fWHcn7HgPhp7ulvKKyDHzes4XEZHQUoFHQqKpw8cvXtvIPz7cRWJMJJnJsaTERZMSH0VKXDRDMhMZnZvM2LwUCjITiapeD4v/7L79tX4YfQnMvR3yNUYROR5eH+wr34deVVMHv1mwmcZ2H+mJMaTERfPk8mK6ugP86ppJXDA+9+AP7O6CLa/DR4/Cljdcrk7KgcFzXN+esVfsXc61+G54/TtQcBqc8Z2950jKcTOBjHEzfza/Dq99G+p3wMxb4cKfn/gXQKQX83rOFxGR0FKBR0JqeVEdL64uo7HdR3NHN03tPurbuthV14bP795rMVERzCzI4AeXjmV4XDMsvQ+W/xU6GmHQHJj7VRhxPkREfMJvE5GP8/pgX/n+5ChtaOcr/1jB6pJGvnT6UL5+zkjiog+zvKqlCrpaIL3g0L12Vj8Oz3/FFYL2lZgFg2a7x297y/XryR4DhS/ANQ/BuCtD9rxE+hqv53wREQktFXjkpOjqDrCtuoUN5U0UljXx1IoS2rv8/OdZw7n19GHE+Fth5d/dNuuNxZA5Cub8F0y8FqJiwx2+iGd4fbCvfH/ydPj8/OilQh5buosBafF856LRXDwhz/XmOVY1W10O361hJ+xcDLsWuSL+6Xe6bdqthYcugqqN8MV3IHO4O1b2EfjaYeDM/fv++LvdudILVPwX2YfXc76IiISWCjwSFtXNndz1ciEvrS5jZE4SP7tqIlMHpYPfB+ufgw/+AJVrITnPbbM+7WaISwl32CI9ntcH+8r3J9+ibTXc9VIhGyuaOWVIOp+ZOYizRuWQmuD641hrKWvsIDEmkrSEmGP/RdbuP/unoRjuPRVSBrh+bCv/DlXr3W3xGTDqIsgZ63r+7HgPOhshbbBr0D/5Rtf7R6SP83rOFxGR0FKBR8LqrY2V/M9z6yhv6mB8/1TmDOvHnOGZnDI4jYTid+H937ktfWNT3aB+4Az3DW5GAcQkhjt8kR7H64N95fvw8AcsTywr5g9vbqGiqYPICMMpQ9IJBGBDRRPNHd0kxETyn2cN55Z5BcRGhWi3rC0L4NGr3c8DprnduOLTYcPLrl9PZyOkDoJhZ0D2ONj4svtMiIiCyZ+BM/9nb6HH1w7LHoTyVXDOjyB1QGhiFOnBvJ7zRUQktFTgkbBr6ezm4UVFvLu5mo921ePzW6IjDVMGpXPaiEzOSytjxJYHMRteBPZ5vyblut4O0fHBSwLEpcKAqa4QlDNeO7RIn+P1wb7yfXgFApY1pY0sKKzg7Y3VxMdEMjo3mdG5ySzcUsOCwkoG90vgm+eNYmZBBlnJsce3pAtgx0I3Yyd3/P7Hu7ugtRpS+u8/86dmS7B3298gMgZO/borCi38FTSXu+JPbApceS+MPG//c+5eBrbmCTdjdNL1rrn/8T4HkTDxes4XEZHQUoFHepS2rm6WF9XzwbYaPthaw7rSJgDSEqI5pyCO64b7mZpUT0T9dqjbAe31bhtfX7v7t7XaDfABouLdN8L9hkFipisGJWZBci5kjXbHRHoZrw/2le97toWbq/nRS+vZVt0KQEZiDOP6p/D5eQWcMTLr+Is9R6N2Gyz4vpvVA65vz1nfc0t7n7rZLfOd+WXIGQedzdBWCxtfgeoNEBUHJsJ9bmSOcsuAp3/+k7d4P5SORuhoCn4WtUL1ZjeTqHw1xKXBjC/A0DNUSJKQ83rOFxGR0FKBR3q0mpZOPthaw3tbanh7YxW1rV0MykjghpmDuGb6QDISD9IPorEEipe6S8lSd7215sCdXRIy3U4u2WMgbxL0n+IG+vs29hTxGK8P9pXvez6fP8Dyono2VjSxsbyZRdtrKK5r59QRmXz3ojGMzk2mpbOb2pYu0hKij69vz5EoWQ7dHTB47t4Ciq8dXv+u26FxXwOmw5QbYNynXIFn/XPw0T/cZ0XaIDj3Lrf9++7z+LshIvLghRlrYdeHsOiPsOlV9pthCq6IlDPeNYdurYbsse73djZCc4UrCA0/ByZcDQkZR/ecW2vgo7+7WUgpA9xytPQCSB0Y2ibUbXWw6p8w9HTInRC680rIeD3ni4hIaKnAI57R1R3gtfUV/GPxTpYW1RETFcFpIzI5ZUgG04dkMH5AyqH7QgQC0NHgBsWNxVC9CaoKoXojVG1w2/eCW+aVO9ENZmd8UbN8xHO8PthXvveeru4A//hwJ79/cwtNHT5iIiPo7A4AEB1puGB8HjfMHMTMgoyTO8MHXCNnLMQmQ0zyoQv42991BaHKda7gHxUPDbvcjNCkbMg/xV2Ssl2xprXa7Q5WutwtL5s2HzKGueXCUXGuT9zuLwy6O2Ht026XyMp17vbkXLeUrHarW2Y2+hIYfxUMO3Nvf7mA380Aqt0KSTmukANuedrKR6C7/cDnEZMEWaMgawxkjnBb0qfkQWWhK2JVrIUJ18KsWw//ujVXwuI/uQJZV4s776f/4eKTHsXrOV9EREJLBR7xpE0VzfxzyU4WbqlhR41bKhAbFcH0IencdsZw5gw/isJMIAB126B0pevNULoCSpa5gfopt8Ccr7pBvYgHeH2wr3zvXY1tPh5aVERbVzf9kmLolxjLurJGnllRQlNHN6NykvnhZeOYPaxfuEM9uIDfzeZZdr9bVpU2yPX/aSh2xZG67XvvGxUHGUPdsq7JN0BMwief39q9xZLdha7yNe53rnnCfQkRGQsFp7nPnx0L3bGPi4iCidfB3NtdjM1l0FjqPseqNrgvL6o2QmvV/o+LTXXPp3oDnPYtOPO/D5yZ1FAMi/7gCkj+LjfjaOpn4bXvQM1m+NR9MP5T+z+nUBTtrHUzsWKTXYFKS9mOmNdzvoiIhJYKPOJ51c2drNhZz/KiOl5dW05ZYwenj8zi2xeOZkzeMW6tXr0Z3vsVrH3Kfbs67WYYcS7kTNh/a15fuysGFX3gtvftPwWGngl5kz95mnx3F2z+F0QnwpB5x977QWQfXh/sK9/3Pu1dfl5eU8Yf39rKrro2PjV1AP990RhioyPZVNHM5spmYqMiGJAWz4D0ePJS44mM6IF/4LfWuuVVidlulk0oixB+n9sOfvNr7uLvhqGnuc+TnHFuxlBTmev1M/piSM3/5HO2N7jZP40lru9c5kjAwku3u+VdM78MF/yfO3fZKih8AdY8DhiY9GmYd4frYbf7XI9d55akjbrQLTGrL3Jxj7kUJl7rClMRwVm01rrlXU2lLu6U/pA38cAYA373e9//LVSscccSMt1nYs44V2iLS4W4FFdUi06AqBj3+nR3uCJU3mRI/FjR0N/tnldSTmiXrPVAXs/5IiISWirwSK/S4fPzyOIi/vz2Npo6fFw5ZQBj81IoqW+npL6N4rp2YqMj+N8rJzB+QOonn7B2G7z3G1j92N4ePolZbuDp97nijr/L9XJIzXdT+sFN2R96hrsUnOam6+/W2eK+HV38Jzf4BbccYOjprog08oIjG7yLHITXB/vK971Xh8/Pn97ayr0Lt2GMoSu4jOvjhvRL4H+vnHB0MzHlyFnrlqN9eLcroOyeJRQVB1Pnw9yvHvwzyNcOL37Vfe6lD3GX7k7X5LqzyX3uRca4xtVdrQf2vRs8D+b8pytalSyFrW/ChhfdzKh+w91sWRMBRe9D0Xt7Px8/SVSc62M040uu8PbRP9xndnN5cKbVMMgeDbNug/xpR/96tVS55xyb7Ja+JWUd/TnAve6lK2H9s26mVUaBK7pljnS7f8YdwZjkILye80VEJLRU4JFeqbHNx93vbOVvi4ro6g6QFBtFfno8+ekJrCttpL6ti59eOYGrpx1hIaW9HirWuf4JlevczyYChsx1g9ZBsyA+zQ0Et78D295yl5ZK9/i0Qa7Qk9DPFXfa693j5t7uvgXe8gZsft014zSRrp/D6d/ef7aQyBHw+mBf+b7321rVzCOLd5KdHMvo3BRG5SbT5Q9QWt/Orro27n9vOztr27h2ej7/fdFYUhOi93v88qI6fvrqBk4dnsnXzhlJRE+c7dPTWet6+ZSudDNr8ia7f2OTj/5cvnb3+bV1gftcjE50S9YSs93MneQ8V9BZfDc0lbjPOOt3S80GzYZT/sPNAor4WA+97k7XiLqj0c2c8nW4vkPdna6QFBXrnsf652D143t7EpkIGH6u6xfUWOJmMZUsc7uoTb4Bzv6+64H0cQG/68tXt91daja7GUu1W/e/X0K/4GzdM9wle9z+s4QCftj6b7fsrr0BIqPdc61Y474Eioh2M6oadrrCGMBNz8Gws47+tcf7OV9EREJLBR7p1RrbfASsJS0hek9zz5qWTv7rnx+xeHstN8wcxPcvHbunOXNlUwcrdtaTEhfNvBHH+e2xta6Z846FULQQdrznvikddTHM+xoMnHHg/Wu2uEH3ir+5Xgxz/st943m4QXfA7xpHJ/d3U9ePRXena+ZZs9l9i5iQ6QaxiZkQn65+CB7i9cG+8r10+Pz87t9buP+97STFRnHxxDwun9Sf0bkp/OL1jTy6ZBfJcVE0d3Rz0YRcfn3NZOJjDtFgX3oOvw/WP++2jx88x33pcSwFpYNpr4dVj7kZtROvdYWlfXU2w8JfuVlLu5ddT74Bcsa65Vzrnob3fu0+A3dL6Ocaaw+eAwNnuZlJu3scFS+Fmk3ufnGprqF25kj3c+HzbvZRYhakDXYx+X2QNtDt0Db6YveFkLXuS6Gaza64phk8IiISAirwSJ/U7Q/wqzc285d3tzExP5WCzESWF9VT2rB3V5JvnT+K284cHrpfGvC7byKPZDvc2m3w5o9cb4LELDcQThngtsBNHeDOVbrCXcpWQVezW+aVP93NJho4y035PtTvaqlyDS2Ll7hL6Urwdx78vpExkJTrdmJJznWD2OmfP3AALT2C1wf7yvey27rSRu5buJ0FhZW0+/xERhistXx+bgFfP3ckjy3dxU9f3cDEAanc/9npZKccuo9Ztz/Av9ZVMHVwOgPS4k/is5AepXYbvHmXW1YW6HYzlzoaoX6H29J+1pfdEuz0AleEOZymMrf7WvESN8unZoubtTv8bLfUbdSFbvbOCeb1nC8iIqGlAo/0aa+tK+fOZ9bu2YFr2uAMpg5K4+FFRTy/qoxbTx/GnReMOvlb++5Wstx9q1i1wX0j6O/ae1tENOSOhwHTIHsM1GyFXYvdNrj79gvavcY/pT9UrndFocbivefImxQsCs10A9uuFjeVvbUW2mpcM83mCtfPoLncDZBNBEz+jFtitrsJp/QIXh/sK9/Lx7V1dbOgsJIVO+u5elo+E/P3/uG9oLCS2x//iKgIwxdOHcrNc4eQHLf/H9UfbK3hRy+tZ3NlC2PyUnjhtrnERPXuxrvyCVpr3Nb1ax53X2LMvR1GXnj8DZn93RAZFZoYj5DXc76IiISWCjzS5wUCFmPYr4gTCFi+98I6Hl2yi8/MHMSPLx//ibu61LV20e7z4/dbugMB/AFLbmrcAX9sHEegruDSWOKmdueMO/jOW50tULrcFXqqN7np39Wb3PKw1EGuyeSA6W62T94ktx3v0agvgkV/hJV/h4APxl3pmmam5u+dYXS05+wL2utdX6amMhhxntsK+ATw+mBf+V6O1pbKZn7x+iYWFFaSlhDN1VPziY6KoKs7wLbqFt7ZVE1+ejyXTerP3e9s47Yzh/Gt80eHO2yRkPB6zhcRkdBSgUfkEKy1/OL1TdzzzjYum9SfO84dycCMhP0KPVVNHby4uowXVpWxtrTxgHNERRhmDs3g7NE5nDMmh0H9Ek7mU9jLWtcIMyaEv7+5Ej78Myz7q1sitq/EbMgY6mb3ZAx1TabbG9zMocYSd4mMCTb3nOQu/Ua4hpMtlcHZQpVuNpENuKn0ge7gYyZD/8nHX0QKBFwPhZ0fuKbZg+e6Rp+h2q4+EIDKta7Z5pYFbhq/3WfXoOyxrh/DuCtCWuzx+mBf+V6O1ZqSBn67YDPvbK4mKsIQExlBYmwU8+cM4ZZ5BcRFR/Ktp1bzzMoSnv7yHKYOSscfsPxz6S4+3F7LpRP7c86YbKIiNbtHvMPrOV9EREJLBR6RT3D3O1v5xWuumWJcdAQjc5IZmZNMZVMHH2ytIWBh/IAULpqQR2ZiLJERhqhIgzGG9WWNvLWhii1VLQCMyE7i/HG5XDA+l3H9U8K39CuU/D43K2V34aax2M3yqdsBddtcsWa3yFg3wyc13xWcKtbt3fnkaEREQe6EvQ0wh5zqGkJ/ku4ut+PKhhdh5yJor3PHo+JdHHFpMOk61z8hZ+zRxWStW7624x3XWHvHe3vPnzfZzdoZca5bKrfxFddsdNdiwEL+DJj5JRh7+XH3bPD6YF/5Xo6XtfaQubW5w8cFv3uPmKgIfn3tJO56qZBVxQ0kxUbR0tnNgLR4PjNzEKePzGJUbjLRhyj2WGvx+a2WeknYeT3ni4hIaKnAI3IENpQ3sbakkY0VzWyubGZjRTMJMZFcNqk/V0zpz/Dsw+8EsrO2lTc3VLGgsJKlRXX4A5YBafGcPy6XmUMzGJWTfMDsoF6jq9UVfuIzXBFm3z+8An7XmLJ8NdRucTt2JeW4bXWTc90OKxFRrudPRCR0tUHZSreDScky1xza1+rOlTPBNaMeMtf1JEobsrefQns9rHgIltzrCk6pwW3rB892BaK0IW6nsxUPu+ab/i633CwxuJvY7tjTBrsZSRkFblZSS1WwmBO8NJe535eSD0NPh4LT3Ta6h9ruvqkc1j8Lyx5w2/Im94dTboFpn4PEfsf0cnt9sK98Lyfa4m21XH//hwBkJMbwvUvGcOnE/ry5sYqHFxWxaFst4Ar6E/PTmFmQwbzhmUwZlE5LZzfPrizhsaW72FXXxnnjcrlhxiBmD+vXOwr24jlez/kiIhJaKvCInGT1rV38e0Mlr6+vYOGWGrq63bKduOgIhmcnMTo3hSmD0pgyMJ2ROUlaLnA4/m4o+2jvrJldS/buBhYVD5kjXCFm29uuEDT0DJj9nzDs7EM302ytdY03y1dDW51rON1WC63VbpvcPQwQzJ8J/YLFpVPd78gYenRbywcCsOUNWHIPbH/HFZS+sRGiYo/u9cD7g33lezkZHnhvO7vq2vj6OSNJT4zZ77bShnZW7qxn5a56Vu6sZ21pIwELCTGRdPstXf4AUwalMTYvhZfXlNPY7mNgRjwFmUmkxUeTlhDNmLwU5g7LDN+yXOkzvJ7zRUQktFTgEQmj9i4/myub2VTZzOYK9+/6sibqWt1uWQkxkUzKT+Oyyf25csoA4qIjwxxxD+drDzaX3ugaS1dtcDNjBs2G2be5XceOlbVu55W67W5L3bodbhvdgtMga8zx776yW9VG9xwmXnNMD/f6YF/5XnqaxnYfH26v5YOtNcRERnDN9IGMynWzNjt8fl5dW86ra8upbumisa2L2tYumju6AchPj2fa4HSGZiYxNCuRgRkJJMVGkRQbRUp8FAkxB+64tKmime5AgHH9U0/q8xRv8nrOFxGR0FKBR6SHsdZSXNfOyl31fLSrnsXba9lc2UJmUiw3zxnMjbMGk5YQc9jHb6psJjoygmFZSScxcukJvD7YV74Xr7PWsq26lUXbavhgaw3rSpsoa2zn48OtCANfOn0Yd5w7ck+vn2dXlvDtZ9ZiDPz9lpnMKMgIwzMQL/F6zhcRkdBSgUekh7PWsnhbLfcu3M67m6uJj47kwgm5jM1LYUROMiOyk8hOjmXFznreKKzkjcIKiutc4+KzRmfzlTOGMX2I/kjoK7w+2Fe+l96ow+dnR00rpfXttHZ109blZ1lRHc+uLGXKoDR+9+nJPLGsmLvf2casoRlUNXdS3dTJY1+cxfgBe2fytHR2kxAdScTH+rX5/G6p76GaQh+NhrYuHlm8k0+fMpCclBDtKignjNdzvoiIhJYKPCIesrGiifsX7uDtTVV7lnEBREYY/AFLTGQEc4f349yxudS2dPK3RUXUtXYxY0gGt5xaQL/EGLr8Abr9lu5AgLF5qeSmHnwA39Th47cLNjOzoB8XjM89WU9RjpPXB/vK99KXvLymjO88u5a2Lj/+gOX6GQO56/LxVDd3cvU9i+jsDvDA/OmsL2vi+Y9KWb6znuhIQ15qPP3T4mj3BShvaKe6pRMD9E+LZ1BGAiNzkrlyygAm5qdijCEQsLy1sYqnVhQzJDORm+cMIS81/oB4als6ufHBpWwobyI3JY4H5k/fr8AkPY/Xc76IiISWCjwiHlXb0snWqha2VLVQXNfGxPw0Th+VRVLs3p4ObV3dPL60mPvf2055Y8cB50iIieSb541i/pwh++3gtaq4gf96bCXFde1ERhj+eP0ULpqQd1Kelxwfrw/2le+lrymua+PHLxcyd3gmn509eM9uXNurW7jmL4upDRbzR+YkccH4PLq6A5Q2tFPW0E5CTCR5qXHkpsYTCFiK69sormujsLyJDl+A0bnJnDk6m3+tLaeoto3MpBjqWruIMIZLJuZxw6zBTB2UTmSEoaalkxvuX0JRbSv/ffEY/vLONurbfPzuusmcOiKTVcUNfLSrgfSEGK6aNoDYKPWE6wm8nvNFRCS0VOAR6QO6ugMsC27PHh0ZQXSkIWDh7ne28s6maiYPTOPnV01kRHYSD7y/nV+8tomclDh+dtUEfv/vLawqbuDuG6Zy3rjQz+Qpqmnl3oXbWFBYyS+vnsSZo7ND/jv6Eq8P9pXvRfbaVNHMK2vLOX9cDmPzUo54K/amDh8vrS7jiWXFrClpZMqgNG6ZV8D543KpaOzgoUVFPL50F61dftISojltRBaF5U2U1Lfx4PxTmDs8k6rmDr7wyArWlDQQYdws0d0GpMVz+9kj+NTUAdrpMcy8nvNFRCS0VOAR6cOstby4uowfvVRIc4eP0bkprC1t5IJxufz8qomkJkTT3OHjpgeXsr6skftumr6nAOMPWJYV1fHelmrG9U/l3LE5R9X/YUN5E/e8s42X15QRFRlBVlIsta2d/P2WmZyinkHHzOuDfeV7kdBqbPORmhB9wPHmDh/vbq7m7Y3VvLu5is7uAPd/djqzhvbbc58On58/vbUVgGmD05k6KJ21pY388vWNrC5pJDMplgFpcWQkxpCbGs8t8woYnq3m/ieT13O+iIiElgo8IkJdaxc/frmQ19ZV8N2Lx3DjzEH7fVPc2O7jhgc+ZHNlC/990Rg2VTbzxvoKalr29gHKTo7luhmDuH7GwIP2dgAIBCzvbqnm4UVFvLOpmsSYSG6cNZhbTi0g0hiuuXcx1c2dPPHF2Yztn3LCn3dv5PXBvvK9yMkXCFj81h5xkd5ayxuFlby2roLa1i7qWjvZUd2KL2C549yR/Me8guOa2VPa0E6nz8+QfokHNJT+eBxHOqupt/J6zhcRkdBSgUdE9vAH7H69ePbV0NbF9fcvYUN5EwkxkZw5OpsLxuVy+qgslu2o4+8f7uTdzdVEGMOcYf0Y2z+FUTnJjMpNJjs5jhdXl/H3xUXBPhCx3DRrMPPn7L/le2lDO1ffswif3/L0rbMZkpl4kp557+H1wb7yvYg3VTV38P3n1/Pa+gom5acydXA6FY0dlDd2EGFgSGYiBf0SGdQvgX6JsWQkxpCaEE1Tu4+61i6qmjtYsbOeD7bWsqOmFXB94sbkpTAyJ4mMxBjSE2KIjY5kU0UTa0sa2VDRzJi8FD43ZwgXTcgjJqrvLRfzes4XEZHQUoFHRI5YY7uPVcUNzCzIIC76wAabu2rbeHTpTt7dVM226hZ8/v3zy9RBacyfM4QLxx96IL61qoVr711MQkwkD8yfTkFmopp5HgWvD/aV70W8y1rLK2vL+eGLhbR1dZOXGkf/tHh8/gBFNW1UNB3Y7H9fCTGRzBraj3nDM0mKi6KwrIn1ZY1sr26lod23pw9QcmwU4wekMio3mYVbqtle3UpW8u4vDoaQGn/gkrSPCwQsz68qpd3n5+pp+Z79nPF6zhcRkdBSgUdETgifP8DO2lY2VjSzq66NU4dnMSH/yLbbXVPSwPX3fUhrlx+AzKRY+qfFud1iUuLITokjJyWOnJRY0hNiSI2PJiUumqS4qEPOQOorvD7YV74X8b7dY8uPL59q6+qmtL49uKyri6Z2Hynx0fRLjKFfUgyDMhIPWfwPBCzNnd20dXWTkxy3Z+lWIGB5b2sND32wg7c3VZMUG8VnZw/m8/MKyEyKPei5impaufOZNSzZUQe4ptHfPH8kl08asOe8Xln+5fWcLyIioaUCj4j0SEU1rSwtqqO8oYPyxnbKGjsob2inqrmTxnbfIR+XlhBN/9R48tPjGZAeT356AqePzGR4dvJJjD58vD7YV74XkWNVWNbEn9/Zyqtry7HWFW5G5iQxNCuJhJhIoiIiaO3q5pHFRURHRvA/F49hQFoCP3ttA+tKm4IFIUtzRzc+f4AxeSmcMiR+3QTrAAAN6UlEQVSDU4ZkMDInif5p8STGRtHtD7Cpspm1JY10ByyfPmXgUW0yEEpez/kiIhJaKvCIiOd0+PxUNnVQ2dRJQ1sXTR3dNLX7aOrwUdvSRWlDOyX1bZTUt9MWnAU0eWAaV0/L59JJ/UmNj6apw8fq4gZW7mygsLyRmKhI0hOiSU+IIT0hmtzUeEbkJDE4I+GomoUGApat1S2s2tXAmtIGUuKimZifxsT8VPJS4074N8JeH+wr34vI8dpa1cJr68rZXNnC5spmimpb6eoOsHun93PH5vCTK8aTkxIHuLz98tpy3tlYRXxMJEmxURhjWFPSwEe7Gmj3+fecOy0hmvYuP53dgT3HZgzJ4E83TCE7Oe6AWAIBy9MrSihpaGfW0AymDko/6BLnY+X1nC8iIqGlAo+I9FrWWiqaOnhlTTlPLS9hU2UzMVERDEyPZ3tNK9aCMVCQmUggYKlv8x0wOygmMoKhWYmMyk1mZkE/Th2RycCMhD23BwKWwvIm3t1czeJttawubqC5sxtwfSLafX66g39VZCbFMCYvhfz0BPLT4+mfFseAtASGZiUecinB0fL6YF/5XkROlKPdLQzccuMN5U3sqGmlrKGD0oY2YqMimZifysT8NFYXN/CdZ9eSHBfFn2+YyilDMvY8dldtG996evWepWAAMVERjMhOoqs7QFuXn7aubu69aTozCjIO9us/kddzvoiIhJYKPCLSJ1hrWVfaxNMriimpb2difhpTB6cxaWAaKXF7G3L6A5aGNjcLaHNlC1uqmtlS2cL6skYqmzoBGNwvgXnDM2nv8rNwS/We7eLH5KUwbXAakwemM3lgGkMzE+nyB9hY0cyakgbWlDSyqaKZsgbXg2JfGYkxjMhOYmROMiNzkw/Yqv5IeX2wr3wvIl6zsaKJW/++gp11bRRkJjIqJ5mclDieXF5MpDF875KxXDAhl2U76li0rZbt1S3Ex0SSEBNFQkwkn509hOHZScf0u72e80VEJLRU4BEROQLWWrZVt/Lelmre31LDh9triYmK4LSRWZw+MotTR2SRlXzks3Dau/yUNbZTUt/O1qoWtlQ2s7nSFZNS4qP54NtnHVOcXh/sK9+LiBc1dfh46IMi1pc1srmyhZ21rZw2Mov/vXIC/dPiT9jv9XrOFxGR0IoKdwAiIl5gjGF4dhLDs5P43NwC/AGLgT07rhyt+JhIhmUlMSwridNHZu05bq09bBPpnsgYMxB4BMgBLHCftfb3xpgM4AlgCFAEXGutrQ9XnCIiJ0pKXDRfPXvEnuv+gO3zuzqKiMjJF56W/yIiHhcZYY65uHM4xhjSEmJCft4TrBv4hrV2LDALuM0YMxb4NvCmtXYE8GbwuohIr6fijoiIhIMKPCIiclysteXW2pXBn5uBDcAA4HLg4eDdHgauCE+EIiIiIiK9nwo8IiISMsaYIcAUYAmQY60tD95UgVvCdbDHfNEYs9wYs7y6uvqkxCkiIiIi0tuowCMiIiFhjEkCngG+Zq1t2vc26zr6H7Srv7X2PmvtdGvt9KysrIPdRUREREREPoEKPCIictyMMdG44s6j1tpng4crjTF5wdvzgKpwxSciIiIi0tupwCMiIsfFGGOAB4EN1trf7HPTi8D84M/zgRdOdmwiIiIiIn2FtkkXEZHjNRe4CVhrjFkVPPZd4GfAk8aYW4CdwLVhik9EREREpNdTgUdERI6LtfZ94FB7Ap99MmMREREREemrtERLRERERERERMTjVOAREREREREREfE4FXhERERERERERDxOBR4REREREREREY8z1tpwx/CJjDHVuB1YjkUmUBPCcE40r8UL3ovZa/GC92L2WrzgvZgPFe9ga23WyQ4mVPpYvj8R9BroNQC9BtB3XgNP53wREQktTxR4jocxZrm1dnq44zhSXosXvBez1+IF78XstXjBezF7Ld6TQa+JXgPQawB6DUCvgYiI9E1aoiUiIiIiIiIi4nEq8IiIiIiIiIiIeFxfKPDcF+4AjpLX4gXvxey1eMF7MXstXvBezF6L92TQa6LXAPQagF4D0GsgIiJ9UK/vwSMiIiIiIiIi0tv1hRk8IiIiIiIiIiK9mgo8IiIiIiIiIiIe12sLPMaYC4wxm4wxW40x3w53PAdjjPmrMabKGLNun2MZxpgFxpgtwX/TwxnjvowxA40xbxtjCo0x640xtweP9+SY44wxS40xq4Mx/yh4vMAYsyT4/njCGBMT7lj3ZYyJNMZ8ZIx5OXi9p8dbZIxZa4xZZYxZHjzWk98XacaYp40xG40xG4wxs3t4vKOCr+3uS5Mx5ms9OeaTzQs5P9S8mJNPBK/lyxPBazntRDDGfD34/2CdMeax4Od/n3sviIhI39YrCzzGmEjgz8CFwFjgemPM2PBGdVAPARd87Ni3gTettSOAN4PXe4pu4BvW2rHALOC24Ovak2PuBM6y1k4CJgMXGGNmAT8HfmutHQ7UA7eEMcaDuR3YsM/1nh4vwJnW2snW2unB6z35ffF74DVr7WhgEu617rHxWms3BV/bycA0oA14jh4c88nkoZwfal7MySeCF/NlqHkqp4WaMWYA8FVgurV2PBAJXEfffC+IiEgf1isLPMAMYKu1dru1tgt4HLg8zDEdwFq7EKj72OHLgYeDPz8MXHFSgzoMa225tXZl8Odm3AByAD07ZmutbQlejQ5eLHAW8HTweI+K2RiTD1wMPBC8bujB8R5Gj3xfGGNSgdOABwGstV3W2gZ6aLwHcTawzVq7E+/EfKJ5IueHmhdzcqj1onx5zHpBTguVKCDeGBMFJADl9LH3goiISG8t8AwAive5XhI85gU51try4M8VQE44gzkUY8wQYAqwhB4ec3D6/iqgClgAbAMarLXdwbv0tPfH74D/BwSC1/vRs+MFVzR7wxizwhjzxeCxnvq+KACqgb8Fl3U8YIxJpOfG+3HXAY8Ff/ZKzCeal3N+SHgpJ4eYF/NlqHk9px03a20p8CtgF66w0wisoO+9F0REpI/rrQWeXsG6Pex73D72xpgk4Bnga9bapn1v64kxW2v9waUt+bhv+keHOaRDMsZcAlRZa1eEO5ajNM9aOxW3ROY2Y8xp+97Yw94XUcBU4B5r7RSglY8tXehh8e4R7B9xGfDUx2/rqTHLiee1nBwqHs6XoebZnBYqwf5Cl+OKXf2BRA5cAi8iItLr9dYCTykwcJ/r+cFjXlBpjMkDCP5bFeZ49mOMicb9IfGotfbZ4OEeHfNuwSnrbwOzgbTgNG7oWe+PucBlxpgi3DKTs3C9FXpqvMCeb0+x1lbhesPMoOe+L0qAEmvtkuD1p3F/HPXUePd1IbDSWlsZvO6FmE8GL+f84+LlnBwCnsyXJ4CXc1qonAPssNZWW2t9wLO490dfey+IiEgf11sLPMuAEcHdE2JwSxpeDHNMR+pFYH7w5/nAC2GMZT/B3gYPAhustb/Z56aeHHOWMSYt+HM8cC6uT8XbwNXBu/WYmK2137HW5ltrh+Det29Za2+gh8YLYIxJNMYk7/4ZOA9YRw99X1hrK4BiY8yo4KGzgUJ6aLwfcz17l2eBN2I+Gbyc84+ZF3NyKHkxX54IHs9pobILmGWMSQj+v9j9GvSp94KIiIhxs3Z7H2PMRbi1+ZHAX621Pw1zSAcwxjwGnAFkApXAD4DngSeBQcBO4Fpr7ccbMYeFMWYe8B6wlr39Dr6L6/nQU2OeiGusGIkraD5prb3LGDMU941vBvARcKO1tjN8kR7IGHMG8E1r7SU9Od5gbM8Fr0YB/7TW/tQY04+e+76YjGvKGgNsBz5H8P1BD4wX9hTPdgFDrbWNwWM99jU+2byQ80PNizn5RPFKvjxRvJjTQs0Y8yPg07jd5T4C/gPXc6dPvRdERKRv67UFHhERERERERGRvqK3LtESEREREREREekzVOAREREREREREfE4FXhERERERERERDxOBR4REREREREREY9TgUdERERERERExONU4BE5QYwxZxhjXg53HCIicuIp54uIiEi4qcAjIiIiIiIiIuJxKvBIn2eMudEYs9QYs8oYc68xJtIY02KM+a0xZr0x5k1jTFbwvpONMR8aY9YYY54zxqQHjw83xvzbGLPaGLPSGDMsePokY8zTxpiNxphHjTEmbE9URESU80VERKTXUoFH+jRjzBjg08Bca+1kwA/cACQCy62144B3gR8EH/IIcKe1diKwdp/jjwJ/ttZOAuYA5cHjU4CvAWOBocDcE/6kRETkoJTzRUREpDeLCncAImF2NjANWBb8ojUeqAICwBPB+/wDeNYYkwqkWWvfDR5/GHjKGJMMDLDWPgdgre0ACJ5vqbW2JHh9FTAEeP/EPy0RETkI5XwRERHptVTgkb7OAA9ba7+z30Fjvvex+9ljPH/nPj/70f85EZFwUs4XERGRXktLtKSvexO42hiTDWCMyTDGDMb937g6eJ/PAO9baxuBemPMqcHjNwHvWmubgRJjzBXBc8QaYxJO6rMQEZEjoZwvIiIivZa+WZI+zVpbaIz5H+ANY0wE4ANuA1qBGcHbqnA9GwDmA38JDua3A58LHr8JuNcYc1fwHNecxKchIiJHQDlfREREejNj7bHOQhbpvYwxLdbapHDHISIiJ55yvoiIiPQGWqIlIiIiIiIiIuJxmsEjIiIiIiIiIuJxmsEjIiIiIiIiIuJxKvCIiIiIiIiIiHicCjwiIiIiIiIiIh6nAo+IiIiIiIiIiMepwCMiIiIiIiIi4nH/H3YqdEosrMW/AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "%matplotlib inline\n", + "\n", + "import matplotlib\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import pickle\n", + "\n", + "plt.rcParams[\"figure.figsize\"] = [16, 10]\n", + "\n", + "for i in range(5):\n", + " with open(\"../history/fold-%d.history\" % i, \"rb\") as fin:\n", + " history = pickle.load(fin)\n", + " plt.subplot(2, 3, i + 1)\n", + " plt.plot(history['loss'])\n", + " plt.plot(history['val_loss'])\n", + " plt.title('%d' % (i + 1))\n", + " plt.ylabel('loss')\n", + " plt.xlabel('epoch')\n", + " plt.legend(['train', 'test'], loc='upper right')\n", + "plt.tight_layout()\n", + "plt.show()\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA7YAAAFNCAYAAAA0MPNrAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHQ9JREFUeJzt3XuUZWV5J+DfKw3xOgLSQUKDjZEhYTJemNYhY2KMxAQjgsk4RpcmaFhhskKiTnQpEmcwF9do4njLGBO8YkJUxBtekoiIMVmJYCOKCDoQ5NII0kYRUQcE3vnj7M5UyqbrUN2nTu3q51mrVu3v23uf/Xavr/r0r7797VPdHQAAABire8y7AAAAANgZgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2ALAKlRVf1pV/33edQDAGAi2AOyWqqqr6iGL+l5aVX+xAtd+W1X9waK+jUNN65Kku3+9u39/ite6qqp+Zla1AsAYCLYAMEPbguoYjbl2AHYvgi0AbEdV7VdVH6qqm6rq61X1d1V1j2HfD1XVe6pqa1V9uaqes+C8l1bVWVX1F1V1c5JnLfP6/zKre1e1VNWfJzk4yQer6paqeuFw/LFV9YXh+E9U1Y8ueN0jquqiqvpWVb27qt614DqPraotVfWiqrohyVurap/h2lur6hvD9oYFr/eJqvqDqvqHoYYPVtUDquqMqrq5qj5dVRuX83cAANMSbAFg+56fZEuS9Un2T3JKkh7C7QeTfC7JgUmOSvK8qvq5Becel+SsJHsnOWNWtXT3Lye5JsmTuvu+3f2HVfVvk7wjyfOG4z+SSfDdq6r2SvK+JG9Lsu9w3C8sutYDh30PSnJiJv9XeOvQPjjJd5P870XnPC3JL2fy9/HDSf5xOGffJJclOXUX/B0AwF0SbAFg+76X5IAkD+ru73X333V3J3lkkvXd/XvdfVt3X5nkjZmEu23+sbvf3913dvd37+L1XzDMqN5UVTcluXgZtWzPLyX5cHef093fS/LKJPdK8p+SHJlkXZLXDa/z3iQXLDr/ziSndvet3f3d7v7n7n5Pd3+nu7+V5GVJfmrROW/t7n/q7m8m+ask/9TdH+vu25O8O8kjdvBnA4CdJtgCsLu6I8mei/r2zCREJskfJbkiyUer6sqqOnnof1CSH1oUSk/JZCZ1m2unuP4ru3vvbV9JHrqDY++qlu35oSRXb2t0951DPQcO+65bFIoX17q1u//vtkZV3buq/qyqrh5urf5kkr2rao8F53x1wfZ3t9O+7w7qBYCdJtgCsLu6JsnGRX2HZAiF3f2t7n5+dz84ybFJfruqjsokCH55YSjt7vt1988veJ27mk1dlh3Usr1rfSWT8J0kqapKclCS65Jcn+TAoW+bgxZfblH7+UkOS/Ifu/vfJHnMtpde7p8HAHY1wRaA3dW7krykqjYMD2L6mSRPymRtbKrqmKp6yBACv5nJDO+dmdy6+63hAUv3qqo9qurHquqRsyp0B7Ukk9nRBy84/MwkT6yqo6pqz0yC6a1J/iGTta93JPnNqlpXVccledQSl79fJrOuN1XVvrFeFoBVSLAFYHf1e5mEvb9P8o0kf5jkGd19ybD/0CQfS3JLJoHwT7r7vO6+I8kxSR6e5MtJvpbkTUnuP8Nat1vLsO9/ZhLQb6qqF3T3l5I8M8kfD7U9KZOHS93W3bcl+cUkJyS5aTjuQ5kE37vymkzW6H4tyaeS/PWu/sMBwM6qu372BACw1lXV+Un+tLvfOu9aAGC5zNgCwG6kqn6qqh443Ip8fCYPrTILC8CorZt3AQDAijosk3W490lyZZKndPf18y0JAHaOW5EBAAAYNbciAwAAMGqCLQAAAKM26jW2++23X2/cuHHeZQAAADADF1544de6e/1Sx4062G7cuDGbN2+edxkAAADMQFVdPc1xbkUGAABg1ARbAAAARk2wBQAAYNRmFmyr6i1VdWNVXbKdfc+vqq6q/YZ2VdXrquqKqrq4qo6YVV0AAACsLbOcsX1bkqMXd1bVQUl+Nsk1C7qfkOTQ4evEJG+YYV0AAACsITMLtt39ySRf386uVyd5YZJe0Hdckrf3xKeS7F1VB8yqNgAAANaOFV1jW1XHJbmuuz+3aNeBSa5d0N4y9AEAAMAOrdjn2FbVvZOcksltyDvzOidmcrtyDj744F1QGQAAAGO2kjO2P5zkkCSfq6qrkmxI8pmqemCS65IctODYDUPf9+nu07p7U3dvWr9+/YxLBgAAYLVbsWDb3Z/v7h/s7o3dvTGT242P6O4bkpyd5FeGpyMfmeSb3X39StUGAADAeM3y437ekeQfkxxWVVuq6oQdHP6RJFcmuSLJG5P8xqzqAgAAYG2Z2Rrb7n76Evs3LtjuJCfNqpZ52njyh+ddwuhd9fInzrsE8LO8C/hZZrXw87zz/DwDq82KPhUZAAAAdrUVeyoyLJffrMPa4Gd51zBTBgDfz4wtAAAAoybYAgAAMGqCLQAAAKNmjS0AjIi1yqwGxiGrgWcOsJBgCwAAjI5fsOy8tfTLAbciAwAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjNrNgW1Vvqaobq+qSBX1/VFVfrKqLq+p9VbX3gn0vrqorqupLVfVzs6oLAACAtWWWM7ZvS3L0or5zkvxYdz80yf9J8uIkqarDkzwtyb8bzvmTqtpjhrUBAACwRsws2Hb3J5N8fVHfR7v79qH5qSQbhu3jkryzu2/t7i8nuSLJo2ZVGwAAAGvHPNfY/mqSvxq2D0xy7YJ9W4Y+AAAA2KG5BNuq+p0ktyc5YxnnnlhVm6tq89atW3d9cQAAAIzKigfbqnpWkmOSPKO7e+i+LslBCw7bMPR9n+4+rbs3dfem9evXz7RWAAAAVr8VDbZVdXSSFyY5tru/s2DX2UmeVlU/UFWHJDk0yQUrWRsAAADjtG5WL1xV70jy2CT7VdWWJKdm8hTkH0hyTlUlyae6+9e7+wtVdWaSSzO5Rfmk7r5jVrUBAACwdsws2Hb307fT/eYdHP+yJC+bVT0AAACsTfN8KjIAAADsNMEWAACAURNsAQAAGDXBFgAAgFETbAEAABg1wRYAAIBRE2wBAAAYNcEWAACAURNsAQAAGDXBFgAAgFETbAEAABg1wRYAAIBRE2wBAAAYNcEWAACAURNsAQAAGDXBFgAAgFETbAEAABg1wRYAAIBRE2wBAAAYNcEWAACAURNsAQAAGDXBFgAAgFETbAEAABg1wRYAAIBRE2wBAAAYNcEWAACAURNsAQAAGDXBFgAAgFGbWbCtqrdU1Y1VdcmCvn2r6pyqunz4vs/QX1X1uqq6oqourqojZlUXAAAAa8ssZ2zfluToRX0nJzm3uw9Ncu7QTpInJDl0+DoxyRtmWBcAAABryMyCbXd/MsnXF3Ufl+T0Yfv0JE9e0P/2nvhUkr2r6oBZ1QYAAMDasdJrbPfv7uuH7RuS7D9sH5jk2gXHbRn6AAAAYIfm9vCo7u4kfXfPq6oTq2pzVW3eunXrDCoDAABgTFY62H512y3Gw/cbh/7rkhy04LgNQ9/36e7TuntTd29av379TIsFAABg9VvpYHt2kuOH7eOTfGBB/68MT0c+Msk3F9yyDAAAAHdp3axeuKrekeSxSfarqi1JTk3y8iRnVtUJSa5O8tTh8I8k+fkkVyT5TpJnz6ouAAAA1paZBdvufvpd7DpqO8d2kpNmVQsAAABr19weHgUAAAC7gmALAADAqAm2AAAAjJpgCwAAwKgJtgAAAIyaYAsAAMCoCbYAAACMmmALAADAqAm2AAAAjJpgCwAAwKgJtgAAAIyaYAsAAMCoCbYAAACMmmALAADAqAm2AAAAjJpgCwAAwKgJtgAAAIyaYAsAAMCoCbYAAACMmmALAADAqAm2AAAAjJpgCwAAwKhNFWyr6t/PuhAAAABYjmlnbP+kqi6oqt+oqvvPtCIAAAC4G6YKtt39k0mekeSgJBdW1V9W1eNnWhkAAABMYeo1tt19eZKXJHlRkp9K8rqq+mJV/eKsigMAAIClTLvG9qFV9eoklyV5XJIndfePDtuvnmF9AAAAsEPrpjzuj5O8Kckp3f3dbZ3d/ZWqeslMKgMAAIApTBtsn5jku919R5JU1T2S3LO7v9Pdfz6z6gAAAGAJ066x/ViSey1o33voW5aq+m9V9YWquqSq3lFV96yqQ6rq/Kq6oqreVVV7Lff1AQAA2H1MG2zv2d23bGsM2/dezgWr6sAkz0myqbt/LMkeSZ6W5BVJXt3dD0nyjSQnLOf1AQAA2L1MG2y/XVVHbGtU1X9I8t0dHL+UdUnuVVXrMgnI12fyIKqzhv2nJ3nyTrw+AAAAu4lp19g+L8m7q+orSSrJA5P80nIu2N3XVdUrk1yTSTj+aJILk9zU3bcPh21JcuByXh8AAIDdy1TBtrs/XVU/kuSwoetL3f295VywqvZJclySQ5LclOTdSY6+G+efmOTEJDn44IOXUwIAAABryLQztknyyCQbh3OOqKp099uXcc2fSfLl7t6aJFX13iSPTrJ3Va0bZm03JLlueyd392lJTkuSTZs29TKuDwAAwBoyVbCtqj9P8sNJPpvkjqG7kywn2F6T5MiquncmtyIflWRzkvOSPCXJO5Mcn+QDy3htAAAAdjPTzthuSnJ4d+/0DGl3n19VZyX5TJLbk1yUyQzsh5O8s6r+YOh7885eCwAAgLVv2mB7SSYPjLp+V1y0u09Ncuqi7iuTPGpXvD4AAAC7j2mD7X5JLq2qC5Lcuq2zu4+dSVUAAAAwpWmD7UtnWQQAAAAs17Qf9/O3VfWgJId298eGBz/tMdvSAAAAYGn3mOagqvq1JGcl+bOh68Ak759VUQAAADCtqYJtkpMy+azZm5Okuy9P8oOzKgoAAACmNW2wvbW7b9vWqKp1mXyOLQAAAMzVtMH2b6vqlCT3qqrHJ3l3kg/OriwAAACYzrTB9uQkW5N8Psl/TfKRJC+ZVVEAAAAwrWmfinxnkjcOXwAAALBqTBVsq+rL2c6a2u5+8C6vCAAAAO6GqYJtkk0Ltu+Z5L8k2XfXlwMAAAB3z1RrbLv7nxd8Xdfdr0nyxBnXBgAAAEua9lbkIxY075HJDO60s70AAAAwM9OG0/+1YPv2JFcleeourwYAAADupmmfivzTsy4EAAAAlmPaW5F/e0f7u/tVu6YcAAAAuHvuzlORH5nk7KH9pCQXJLl8FkUBAADAtKYNthuSHNHd30qSqnppkg939zNnVRgAAABMY6qP+0myf5LbFrRvG/oAAABgrqadsX17kguq6n1D+8lJTp9NSQAAADC9aZ+K/LKq+qskPzl0Pbu7L5pdWQAAADCdaW9FTpJ7J7m5u1+bZEtVHTKjmgAAAGBqUwXbqjo1yYuSvHjo2jPJX8yqKAAAAJjWtDO2v5Dk2CTfTpLu/kqS+82qKAAAAJjWtMH2tu7uJJ0kVXWf2ZUEAAAA05s22J5ZVX+WZO+q+rUkH0vyxtmVBQAAANOZ9qnIr6yqxye5OclhSf5Hd58z08oAAABgCksG26raI8nHuvunkwizAAAArCpL3orc3XckubOq7r+rLlpVe1fVWVX1xaq6rKp+vKr2rapzqury4fs+u+p6AAAArF1T3Yqc5JYkn6+qczI8GTlJuvs5y7zua5P8dXc/par2yuQzck9Jcm53v7yqTk5yciYfMQQAAAB3adpg+97ha6cNM7+PSfKsJOnu25LcVlXHJXnscNjpST4RwRYAAIAl7DDYVtXB3X1Nd5++C695SJKtSd5aVQ9LcmGS5ybZv7uvH465Icn+u/CaAAAArFFLrbF9/7aNqnrPLrrmuiRHJHlDdz8ik1ubT154wMLPzF2sqk6sqs1VtXnr1q27qCQAAADGaqlgWwu2H7yLrrklyZbuPn9on5VJ0P1qVR2QJMP3G7d3cnef1t2bunvT+vXrd1FJAAAAjNVSwbbvYnvZuvuGJNdW1WFD11FJLk1ydpLjh77jk3xgV1wPAACAtW2ph0c9rKpuzmTm9l7DdoZ2d/e/WeZ1fyvJGcMTka9M8uxMQvaZVXVCkquTPHWZrw0AAMBuZIfBtrv3mMVFu/uzSTZtZ9dRs7geAAAAa9dStyIDAADAqibYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjNrdgW1V7VNVFVfWhoX1IVZ1fVVdU1buqaq951QYAAMB4zHPG9rlJLlvQfkWSV3f3Q5J8I8kJc6kKAACAUZlLsK2qDUmemORNQ7uSPC7JWcMhpyd58jxqAwAAYFzmNWP7miQvTHLn0H5Akpu6+/ahvSXJgfMoDAAAgHFZ8WBbVcckubG7L1zm+SdW1eaq2rx169ZdXB0AAABjM48Z20cnObaqrkryzkxuQX5tkr2rat1wzIYk123v5O4+rbs3dfem9evXr0S9AAAArGIrHmy7+8XdvaG7NyZ5WpKPd/czkpyX5CnDYccn+cBK1wYAAMD4rKbPsX1Rkt+uqisyWXP75jnXAwAAwAisW/qQ2enuTyT5xLB9ZZJHzbMeAAAAxmc1zdgCAADA3SbYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqKx5sq+qgqjqvqi6tqi9U1XOH/n2r6pyqunz4vs9K1wYAAMD4zGPG9vYkz+/uw5McmeSkqjo8yclJzu3uQ5OcO7QBAABgh1Y82Hb39d39mWH7W0kuS3JgkuOSnD4cdnqSJ690bQAAAIzPXNfYVtXGJI9Icn6S/bv7+mHXDUn2n1NZAAAAjMjcgm1V3TfJe5I8r7tvXrivuztJ38V5J1bV5qravHXr1hWoFAAAgNVsLsG2qvbMJNSe0d3vHbq/WlUHDPsPSHLj9s7t7tO6e1N3b1q/fv3KFAwAAMCqNY+nIleSNye5rLtftWDX2UmOH7aPT/KBla4NAACA8Vk3h2s+OskvJ/l8VX126DslycuTnFlVJyS5OslT51AbAAAAI7Piwba7/z5J3cXuo1ayFgAAAMZvrk9FBgAAgJ0l2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAo7bqgm1VHV1VX6qqK6rq5HnXAwAAwOq2qoJtVe2R5PVJnpDk8CRPr6rD51sVAAAAq9mqCrZJHpXkiu6+srtvS/LOJMfNuSYAAABWsdUWbA9Mcu2C9pahDwAAALZr3bwLuLuq6sQkJw7NW6rqSyt06f2SfG2FrsV4GScsxRhhGsYJSzFGmIZxwg7VK0YxRh40zUGrLdhel+SgBe0NQ9+/6O7Tkpy2kkUlSVVt7u5NK31dxsU4YSnGCNMwTliKMcI0jBOWspbGyGq7FfnTSQ6tqkOqaq8kT0ty9pxrAgAAYBVbVTO23X17Vf1mkr9JskeSt3T3F+ZcFgAAAKvYqgq2SdLdH0nykXnXsR0rfvszo2ScsBRjhGkYJyzFGGEaxglLWTNjpLp73jUAAADAsq22NbYAAABwtwi2U6iqo6vqS1V1RVWdPO96mL+qektV3VhVlyzo27eqzqmqy4fv+8yzRuavqg6qqvOq6tKq+kJVPXfoN1ZIklTVPavqgqr63DBGfnfoP6Sqzh/ed941PFCR3VhV7VFVF1XVh4a2McK/UlVXVdXnq+qzVbV56PN+w79SVXtX1VlV9cWquqyqfnytjBPBdglVtUeS1yd5QpLDkzy9qg6fb1WsAm9LcvSivpOTnNvdhyY5d2ize7s9yfO7+/AkRyY5afj3w1hhm1uTPK67H5bk4UmOrqojk7wiyau7+yFJvpHkhDnWyOrw3CSXLWgbI2zPT3f3wxd8fIv3GxZ7bZK/7u4fSfKwTP5dWRPjRLBd2qOSXNHdV3b3bUnemeS4OdfEnHX3J5N8fVH3cUlOH7ZPT/LkFS2KVae7r+/uzwzb38rkzePAGCsMeuKWobnn8NVJHpfkrKHfGNnNVdWGJE9M8qahXTFGmI73G/5FVd0/yWOSvDlJuvu27r4pa2ScCLZLOzDJtQvaW4Y+WGz/7r5+2L4hyf7zLIbVpao2JnlEkvNjrLDAcIvpZ5PcmOScJP+U5Kbuvn04xPsOr0nywiR3Du0HxBjh+3WSj1bVhVV14tDn/YaFDkmyNclbh6UNb6qq+2SNjBPBFmagJ48b98hxkiRVdd8k70nyvO6+eeE+Y4XuvqO7H55kQyZ3Cf3InEtiFamqY5Lc2N0XzrsWVr2f6O4jMlk+d1JVPWbhTu83ZPJRr0ckeUN3PyLJt7PotuMxjxPBdmnXJTloQXvD0AeLfbWqDkiS4fuNc66HVaCq9swk1J7R3e8duo0Vvs9wO9h5SX48yd5Vte2z5r3v7N4eneTYqroqk+VQj8tkjZwxwr/S3dcN329M8r5MflHm/YaFtiTZ0t3nD+2zMgm6a2KcCLZL+3SSQ4enD+6V5GlJzp5zTaxOZyc5ftg+PskH5lgLq8CwDu7NSS7r7lct2GWskCSpqvVVtfewfa8kj89kLfZ5SZ4yHGaM7Ma6+8XdvaG7N2byf5CPd/czYoywQFXdp6rut207yc8muSTeb1igu29Icm1VHTZ0HZXk0qyRcVKT2WZ2pKp+PpP1LXskeUt3v2zOJTFnVfWOJI9Nsl+SryY5Ncn7k5yZ5OAkVyd5ancvfsAUu5Gq+okkf5fk8/n/a+NOyWSdrbFCquqhmTyoY49Mftl8Znf/XlU9OJPZuX2TXJTkmd196/wqZTWoqscmeUF3H2OMsNAwHt43NNcl+cvufllVPSDeb1igqh6eyYPo9kpyZZJnZ3j/ycjHiWALAADAqLkVGQAAgFETbAEAABg1wRYAAIBRE2wBAAAYNcEWAACAURNsAWDOqmpjVV2yqO+lVfWCedUEAGMi2ALAGlRV6+ZdAwCsFMEWAFaxqnpOVV1aVRdX1TuHvvtU1Vuq6oKquqiqjhv6n1VVZ1fVx5OcO9fCAWAF+W0uAKxuJyc5pLtvraq9h77fSfLx7v7Voe+CqvrYsO+IJA/t7q/Po1gAmAcztgAwf72D/ouTnFFVz0xy+9D/s0lOrqrPJvlEknsmOXjYd45QC8DuRrAFgPn75yT7LOrbN8nXkjwxyeszmYn99LB2tpL85+5++PB1cHdfNpz37ZUqGgBWC8EWAOasu29Jcn1VPS5JqmrfJEcn+fskB3X3eUlelOT+Se6b5G+S/FZV1XD8I+ZSOACsEtbYAsDq8CtJXl9Vrxrav5vkmiTnVdX9M5mlfV1331RVv5/kNUkurqp7JPlykmPmUTQArAbVfVfLegAAAGD1cysyAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGr/D42MMcfetX2FAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "train_csv = pd.read_csv('../data/train.csv')\n", + "\n", + "plt.hist(train_csv.values[:,0])\n", + "plt.title(\"User Histogram\")\n", + "plt.xlabel(\"User\")\n", + "plt.ylabel(\"Frequency\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA7YAAAFNCAYAAAA0MPNrAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHmZJREFUeJzt3X30ZXVdL/D3RwYV1CsiE+IMOKioWTeBJrK8lkqWioneZYaVkZfVaFdL066iq9JW0aWbhdqDhaKimYoPJQmV+JR5U3BA4jGvI6LMOML4wFMaCHzuH2eP/hrn4czDmfPbP16vtc46e3/3d+/zObPOWvzefL/fvau7AwAAAGN1l3kXAAAAALtDsAUAAGDUBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwBYhKrqL6rqt+ZdBwCMgWALwJ1SVV1dVT8xbP9SVX18L372m6vq97ZoW1VVXVXLkqS7n9vdvzvFtb79PQDgzkqwBQC2anPIBoDFTrAF4E6tqr43yV8k+ZGqurmqrh/a71ZVr6qqL1bVtcPU4P2GY4+pqvVV9ZKquq6qNlbVU6vqSVX1/6rqa1X18t2s69ujulV1UFW9v6quH679z1V1l6p6a5LDkvzdUPtLhv5PqarLh/4fHb7j5useXVWfrqqbqupdVfXOBZ+z+Xu9tKq+nORNVXWf4bM3VdXXh+2VC6730ar6var6l6GGv6uq+1bV26rqxqr6VFWt2p1/CwDYEcEWgDu17r4yyXOTfKK779ndBwyHTk3ykCRHJnlwkhVJfnvBqfdLcvcF7a9P8gtJfjDJo5P8VlUdvofKfHGS9UmWJzk4ycsnpfezknwxyU8Ptf+fqnpIkrcneeHQ/9xMgu9dq+quSf4myZuTHDj0e9oWn3W/4dgDkqzJ5G+FNw37hyX5ZpI/3eKcE5I8K5N/iwcl+cRwzoFJrkzyij3yrwAA2yDYAsAWqqoyCXW/3t1f6+6bkvx+JgFus28lOaW7v5XkHUkOSvKa7r6puy9PckWSR2znY35jGFG9fhglvmQ7fb+V5JAkD+jub3X3P3d3b6PvzyY5p7vPG2p7VZL9kvxokkcmWZbktcN13pvkgi3OvyPJK7r7lu7+Znd/tbvf093fGP4dTkny41uc86bu/lx335Dk75N8rrs/2N23JXlXkqO2890AYLcJtgDw3ZYn2T/JhQuC5z8M7Zt9tbtvH7a/Obxfu+D4N5Pcczuf8aruPmDzK8kPbKfvHyZZl+QDVXVVVZ28nb73T/KFzTvdfUeSazIZTb1/kg1bhOJrtjh/U3f/x+adqtq/qv6yqr5QVTcm+ViSA6pqnwXnbPm9d+bfAQB2m2ALAMmWo59fySSQfd+C8Hnv7p5LQBtGgV/c3Q9M8pQkL6qqYzcf3qL7lzKZNpzk26PPhybZkGRjkhVD22aHbvlxW+y/OMlDk/xwd/+XJD+2+dK7+n0AYE8TbAFgMsK4cliDunmU8/VJTquq70mSqlpRVT81j+Kq6slV9eAhkN6Q5PZMpgwnk9ofuKD7WUmOq6pjq2rfTILpLUn+JZO1r7cneX5VLauq45Mcs4OPv1cmIf/6qjow1ssCsAgJtgCQfDjJ5Um+XFVfGdpemsn0308OU3A/mMnI5TwcMXz+zZmE0z/v7o8Mx/53kt8cpkz/Rnd/JpObWP1JJiPPP53JzaVu7e5bk/z3JCcluX7o9/5Mgu+2vDqTNbpfSfLJTKZkA8CiUtu+9wQAsNRV1flJ/qK73zTvWgBgVxmxBYA7kar68aq63zAV+cRMblplFBaAUVs27wIAgL3qoZmsw71HkquSPL27N863JADYPaYiAwAAMGqmIgMAADBqgi0AAACjNuo1tgcddFCvWrVq3mUAAAAwAxdeeOFXunv5jvqNOtiuWrUqa9eunXcZAAAAzEBVfWGafqYiAwAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqMw+2VbVPVX26qt4/7B9eVedX1bqqemdV3XVov9uwv244vmrWtQEAADB+e2PE9gVJrlyw/wdJTuvuByf5epKThvaTknx9aD9t6AcAAADbtWyWF6+qlUmOS3JKkhdVVSV5XJKfG7qcmeSVSV6X5PhhO0neneRPq6q6u2dZIwBw57Lq5HPmXcLoXX3qcfMuAeA/mfWI7auTvCTJHcP+fZNc3923Dfvrk6wYtlckuSZJhuM3DP0BAABgm2YWbKvqyUmu6+4L9/B111TV2qpau2nTpj15aQAAAEZoliO2j0rylKq6Osk7MpmC/JokB1TV5inQK5NsGLY3JDk0SYbj907y1S0v2t2nd/fq7l69fPnyGZYPAADAGMws2Hb3y7p7ZXevSnJCkg93988n+UiSpw/dTkzyvmH77GE/w/EPW18LAADAjszjObYvzeRGUusyWUN7xtB+RpL7Du0vSnLyHGoDAABgZGZ6V+TNuvujST46bF+V5Jit9PmPJD+zN+oBAABg6ZjHiC0AAADsMYItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqMwu2VXX3qrqgqv61qi6vqt8Z2t9cVZ+vqouH15FDe1XVa6tqXVVdUlVHz6o2AAAAlo5lM7z2LUke1903V9W+ST5eVX8/HPtf3f3uLfo/MckRw+uHk7xueAcAAIBtmtmIbU/cPOzuO7x6O6ccn+Qtw3mfTHJAVR0yq/oAAABYGma6xraq9qmqi5Ncl+S87j5/OHTKMN34tKq629C2Isk1C05fP7QBAADANs002Hb37d19ZJKVSY6pqu9P8rIkD0vyQ0kOTPLSnblmVa2pqrVVtXbTpk17vGYAAADGZa/cFbm7r0/ykSRP6O6Nw3TjW5K8KckxQ7cNSQ5dcNrKoW3La53e3au7e/Xy5ctnXToAAACL3Czviry8qg4YtvdL8vgk/7Z53WxVVZKnJrlsOOXsJL843B35kUlu6O6Ns6oPAACApWGWd0U+JMmZVbVPJgH6rO5+f1V9uKqWJ6kkFyd57tD/3CRPSrIuyTeSPHuGtQEAALBEzCzYdvclSY7aSvvjttG/kzxvVvXAndmqk8+Zdwmjd/Wpx827BAAAtmGvrLEFAACAWRFsAQAAGLVZrrGFPcI0WgAAYHuM2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjJtgCAAAwaoItAAAAoybYAgAAMGqCLQAAAKMm2AIAADBqgi0AAACjNrNgW1V3r6oLqupfq+ryqvqdof3wqjq/qtZV1Tur6q5D+92G/XXD8VWzqg0AAIClY5YjtrckeVx3PyLJkUmeUFWPTPIHSU7r7gcn+XqSk4b+JyX5+tB+2tAPAAAAtmtmwbYnbh529x1eneRxSd49tJ+Z5KnD9vHDfobjx1ZVzao+AAAAloZls7x4Ve2T5MIkD07yZ0k+l+T67r5t6LI+yYphe0WSa5Kku2+rqhuS3DfJV2ZZIwB7x6qTz5l3CUvC1aceN+8SAGDRmenNo7r79u4+MsnKJMckedjuXrOq1lTV2qpau2nTpt2uEQAAgHHbK3dF7u7rk3wkyY8kOaCqNo8Ur0yyYdjekOTQJBmO3zvJV7dyrdO7e3V3r16+fPnMawcAAGBxm+VdkZdX1QHD9n5JHp/kykwC7tOHbicmed+wffawn+H4h7u7Z1UfAAAAS8Ms19gekuTMYZ3tXZKc1d3vr6orkryjqn4vyaeTnDH0PyPJW6tqXZKvJTlhhrUBAACwRMws2Hb3JUmO2kr7VZmst92y/T+S/Mys6gEAAGBp2itrbAEAAGBWBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZNsAUAAGDUBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZNsAUAAGDUBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZNsAUAAGDUBFsAAABGTbAFAABg1JbNuwAAAMZl1cnnzLuE0bv61OPmXQIsKUZsAQAAGLWZBduqOrSqPlJVV1TV5VX1gqH9lVW1oaouHl5PWnDOy6pqXVV9pqp+ala1AQAAsHTMcirybUle3N0XVdW9klxYVecNx07r7lct7FxVD09yQpLvS3L/JB+sqod09+0zrBEAAICRm9mIbXdv7O6Lhu2bklyZZMV2Tjk+yTu6+5bu/nySdUmOmVV9AAAALA17ZY1tVa1KclSS84em51fVJVX1xqq6z9C2Isk1C05bn+0HYQAAAJhuKnJV/dfuvnRXPqCq7pnkPUle2N03VtXrkvxukh7e/yjJ/9iJ661JsiZJDjvssF0pCQBGy91oAeC7TTti++dVdUFV/c+quve0F6+qfTMJtW/r7vcmSXdf2923d/cdSV6f70w33pDk0AWnrxza/pPuPr27V3f36uXLl09bCgAAAEvUVMG2ux+d5OczCZ4XVtVfV9Xjt3dOVVWSM5Jc2d1/vKD9kAXdnpbksmH77CQnVNXdqurwJEckuWDqbwIAAMCd0tR3Re7uz1bVbyZZm+S1SY4awuvLN4/GbuFRSZ6V5NKqunhoe3mSZ1bVkZlMRb46yXOG619eVWcluSKTOyo/zx2RAQAA2JFp19j+QJJnJzkuyXlJfnp4jM/9k3wiyXcF2+7+eJLayuXO3dbndPcpSU6ZpiYAAABIph+x/ZMkb8hkdPabmxu7+0vDKC4AAADMxbTB9rgk39w8Nbiq7pLk7t39je5+68yqAwAAgB2Y9q7IH0yy34L9/Yc2AAAAmKtpg+3du/vmzTvD9v6zKQkAAACmN+1U5H+vqqO7+6IkqaofTPLNHZwDAAAwE6tOPmfeJYze1aceN+8S9phpg+0Lk7yrqr6UyZ2O75fkZ2dWFQAAAExpqmDb3Z+qqocleejQ9Jnu/tbsygIAAIDpTDtimyQ/lGTVcM7RVZXufstMqgIAAIApTRVsq+qtSR6U5OIktw/NnUSwBQAAYK6mHbFdneTh3d2zLGYpsqgdAABgtqZ93M9lmdwwCgAAABaVaUdsD0pyRVVdkOSWzY3d/ZSZVAUAAABTmjbYvnKWRQAAAMCumvZxP/9UVQ9IckR3f7Cq9k+yz2xLAwAAgB2bao1tVf1ykncn+cuhaUWSv51VUQAAADCtaW8e9bwkj0pyY5J092eTfM+sigIAAIBpTRtsb+nuWzfvVNWyTJ5jCwAAAHM1bbD9p6p6eZL9qurxSd6V5O9mVxYAAABMZ9pge3KSTUkuTfKcJOcm+c1ZFQUAAADTmvauyHckef3wAgAAgEVjqmBbVZ/PVtbUdvcD93hFAAAAsBOmCrZJVi/YvnuSn0ly4J4vBwAAAHbOVGtsu/urC14buvvVSY6bcW0AAACwQ9NORT56we5dMhnBnXa0FwAAAGZm2nD6Rwu2b0tydZJnbO+Eqjo0yVuSHJzJ+tzTu/s1VXVgkncmWbX5Ot399aqqJK9J8qQk30jyS9190dTfBAAAgDulae+K/NhduPZtSV7c3RdV1b2SXFhV5yX5pSQf6u5Tq+rkTB4l9NIkT0xyxPD64SSvG94BAABgm6adivyi7R3v7j/eStvGJBuH7Zuq6sokK5Icn+QxQ7czk3w0k2B7fJK3dHcn+WRVHVBVhwzXAQAAgK2a6uZRmayp/ZVMgumKJM9NcnSSew2v7aqqVUmOSnJ+koMXhNUvZzJVOcN1r1lw2vqhbctrramqtVW1dtOmTVOWDwAAwFI17RrblUmO7u6bkqSqXpnknO7+hR2dWFX3TPKeJC/s7hsnS2knurur6ruej7s93X16ktOTZPXq1Tt1LgAAAEvPtCO2Bye5dcH+rfnOSOs2VdW+mYTat3X3e4fma6vqkOH4IUmuG9o3JDl0wekrhzYAAADYpmmD7VuSXFBVrxxGa8/PZH3sNg13OT4jyZVbrME9O8mJw/aJSd63oP0Xa+KRSW6wvhYAAIAdmfauyKdU1d8nefTQ9Ozu/vQOTntUkmclubSqLh7aXp7k1CRnVdVJSb6Q7zw26NxMHvWzLpPH/Tx76m8BAADAnda0a2yTZP8kN3b3m6pqeVUd3t2f31bn7v54ktrG4WO30r+TPG8n6gEAAIDppiJX1SsyeSTPy4amfZP81ayKAgAAgGlNu8b2aUmekuTfk6S7v5QpHvMDAAAAszZtsL11mCrcSVJV95hdSQAAADC9aYPtWVX1l0kOqKpfTvLBJK+fXVkAAAAwnWnvivyqqnp8khuTPDTJb3f3eTOtDAAAAKaww2BbVfsk+WB3PzaJMAsAALtp1cnnzLsEWFJ2GGy7+/aquqOq7t3dN+yNogAWG3+AAAAsXtM+x/bmJJdW1XkZ7oycJN39azOpCgAAAKY0bbB97/ACAACARWW7wbaqDuvuL3b3mXurIAAAANgZO3rcz99u3qiq98y4FgAAANhpOwq2tWD7gbMsBAAAAHbFjoJtb2MbAAAAFoUd3TzqEVV1YyYjt/sN2xn2u7v/y0yrAwAAgB3YbrDt7n32ViEAAACwK3Y0FRkAAAAWNcEWAACAURNsAQAAGDXBFgAAgFETbAEAABg1wRYAAIBRE2wBAAAYNcEWAACAUZtZsK2qN1bVdVV12YK2V1bVhqq6eHg9acGxl1XVuqr6TFX91KzqAgAAYGmZ5Yjtm5M8YSvtp3X3kcPr3CSpqocnOSHJ9w3n/HlV7TPD2gAAAFgiZhZsu/tjSb42Zffjk7yju2/p7s8nWZfkmFnVBgAAwNIxjzW2z6+qS4apyvcZ2lYkuWZBn/VD23epqjVVtbaq1m7atGnWtQIAALDI7e1g+7okD0pyZJKNSf5oZy/Q3ad39+ruXr18+fI9XR8AAAAjs1eDbXdf2923d/cdSV6f70w33pDk0AVdVw5tAAAAsF17NdhW1SELdp+WZPMdk89OckJV3a2qDk9yRJIL9mZtAAAAjNOyWV24qt6e5DFJDqqq9UlekeQxVXVkkk5ydZLnJEl3X15VZyW5IsltSZ7X3bfPqjYAAACWjpkF2+5+5laaz9hO/1OSnDKregAAAFia5nFXZAAAANhjBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZNsAUAAGDUBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZNsAUAAGDUBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZNsAUAAGDUBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNRmFmyr6o1VdV1VXbag7cCqOq+qPju832dor6p6bVWtq6pLquroWdUFAADA0jLLEds3J3nCFm0nJ/lQdx+R5EPDfpI8MckRw2tNktfNsC4AAACWkJkF2+7+WJKvbdF8fJIzh+0zkzx1QftbeuKTSQ6oqkNmVRsAAABLx95eY3twd28ctr+c5OBhe0WSaxb0Wz+0AQAAwHbN7eZR3d1JemfPq6o1VbW2qtZu2rRpBpUBAAAwJns72F67eYrx8H7d0L4hyaEL+q0c2r5Ld5/e3au7e/Xy5ctnWiwAAACL394OtmcnOXHYPjHJ+xa0/+Jwd+RHJrlhwZRlAAAA2KZls7pwVb09yWOSHFRV65O8IsmpSc6qqpOSfCHJM4bu5yZ5UpJ1Sb6R5NmzqgsAAIClZWbBtrufuY1Dx26lbyd53qxqAQAAYOma282jAAAAYE8QbAEAABg1wRYAAIBRE2wBAAAYNcEWAACAURNsAQAAGDXBFgAAgFETbAEAABg1wRYAAIBRE2wBAAAYNcEWAACAURNsAQAAGDXBFgAAgFETbAEAABg1wRYAAIBRE2wBAAAYNcEWAACAURNsAQAAGDXBFgAAgFETbAEAABg1wRYAAIBRE2wBAAAYNcEWAACAUVs2jw+tqquT3JTk9iS3dffqqjowyTuTrEpydZJndPfX51EfAAAA4zHPEdvHdveR3b162D85yYe6+4gkHxr2AQAAYLsW01Tk45OcOWyfmeSpc6wFAACAkZhXsO0kH6iqC6tqzdB2cHdvHLa/nOTg+ZQGAADAmMxljW2S/9bdG6rqe5KcV1X/tvBgd3dV9dZOHILwmiQ57LDDZl8pAAAAi9pcRmy7e8Pwfl2Sv0lyTJJrq+qQJBner9vGuad39+ruXr18+fK9VTIAAACL1F4PtlV1j6q61+btJD+Z5LIkZyc5ceh2YpL37e3aAAAAGJ95TEU+OMnfVNXmz//r7v6HqvpUkrOq6qQkX0jyjDnUBgAAwMjs9WDb3VclecRW2r+a5Ni9XQ8AAADjtpge9wMAAAA7TbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZNsAUAAGDUBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZNsAUAAGDUBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZNsAUAAGDUBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZt0QXbqnpCVX2mqtZV1cnzrgcAAIDFbVEF26raJ8mfJXlikocneWZVPXy+VQEAALCYLapgm+SYJOu6+6ruvjXJO5IcP+eaAAAAWMQWW7BdkeSaBfvrhzYAAADYqmXzLmBnVdWaJGuG3Zur6jPzrIdF46AkX5l3EbCH+D2z1PhNs5T4PbNk1B+M4vf8gGk6LbZguyHJoQv2Vw5t39bdpyc5fW8WxeJXVWu7e/W864A9we+ZpcZvmqXE75mlZCn9nhfbVORPJTmiqg6vqrsmOSHJ2XOuCQAAgEVsUY3YdvdtVfX8JP+YZJ8kb+zuy+dcFgAAAIvYogq2SdLd5yY5d951MDqmp7OU+D2z1PhNs5T4PbOULJnfc3X3vGsAAACAXbbY1tgCAADAThFsGbWqOrSqPlJVV1TV5VX1gnnXBLurqvapqk9X1fvnXQvsjqo6oKreXVX/VlVXVtWPzLsm2B1V9evD3xuXVdXbq+ru864JplVVb6yq66rqsgVtB1bVeVX12eH9PvOscXcItozdbUle3N0PT/LIJM+rqofPuSbYXS9IcuW8i4A94DVJ/qG7H5bkEfG7ZsSqakWSX0uyuru/P5MbnZ4w36pgp7w5yRO2aDs5yYe6+4gkHxr2R0mwZdS6e2N3XzRs35TJH00r5lsV7LqqWpnkuCRvmHctsDuq6t5JfizJGUnS3bd29/XzrQp227Ik+1XVsiT7J/nSnOuBqXX3x5J8bYvm45OcOWyfmeSpe7WoPUiwZcmoqlVJjkpy/nwrgd3y6iQvSXLHvAuB3XR4kk1J3jRMrX9DVd1j3kXBruruDUleleSLSTYmuaG7PzDfqmC3HdzdG4ftLyc5eJ7F7A7BliWhqu6Z5D1JXtjdN867HtgVVfXkJNd194XzrgX2gGVJjk7yuu4+Ksm/Z8RT3GBYe3h8Jv/T5v5J7lFVvzDfqmDP6cnjckb7yBzBltGrqn0zCbVv6+73zrse2A2PSvKUqro6yTuSPK6q/mq+JcEuW59kfXdvnkXz7kyCLozVTyT5fHdv6u5vJXlvkh+dc02wu66tqkOSZHi/bs717DLBllGrqspk/daV3f3H864Hdkd3v6y7V3b3qkxuSPLh7jYawCh195eTXFNVDx2ajk1yxRxLgt31xSSPrKr9h78/jo0bojF+Zyc5cdg+Mcn75ljLbhFsGbtHJXlWJiNbFw+vJ827KACSJL+a5G1VdUmSI5P8/pzrgV02zD54d5KLklyayd/Rp8+1KNgJVfX2JJ9I8tCqWl9VJyU5Ncnjq+qzmcxKOHWeNe6OmkylBgAAgHEyYgsAAMCoCbYAAACMmmALAADAqAm2AAAAjJpgCwAAwKgJtgAwZ1V18/C+qqp+bt71AMDYCLYAsHisSiLYAsBOEmwBYPE4Ncmjq+riqvr1qtqnqv6wqj5VVZdU1XOSpKoeU1X/VFXvq6qrqurUqvr5qrqgqi6tqgfN+XsAwF61bN4FAADfdnKS3+juJydJVa1JckN3/1BV3S3J/62qDwx9H5Hke5N8LclVSd7Q3cdU1QuS/GqSF+798gFgPgRbAFi8fjLJD1TV04f9eyc5IsmtST7V3RuTpKo+l2Rz4L00yWP3dqEAME+CLQAsXpXkV7v7H/9TY9VjktyyoOmOBft3xH/fAbiTscYWABaPm5Lca8H+Pyb5laraN0mq6iFVdY+5VAYAi5j/owsAi8clSW6vqn9N8uYkr8nkTskXVVUl2ZTkqXOrDgAWqeruedcAAAAAu8xUZAAAAEZNsAUAAGDUBFsAAABGTbAFAABg1ARbAAAARk2wBQAAYNQEWwAAAEZNsAUAAGDU/j/dckSdh0HbiQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "train_csv = pd.read_csv('../data/train.csv')\n", + "\n", + "plt.hist(np.concatenate([train_csv.values[:,1], train_csv.values[:,2]]))\n", + "plt.title(\"Item Histogram\")\n", + "plt.xlabel(\"Item\")\n", + "plt.ylabel(\"Frequency\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..0b84a24 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,76 @@ +absl-py==0.2.2 +appnope==0.1.0 +astor==0.6.2 +backcall==0.1.0 +bleach==1.5.0 +certifi==2018.4.16 +chardet==3.0.4 +cycler==0.10.0 +decorator==4.3.0 +entrypoints==0.2.3 +gast==0.2.0 +grpcio==1.12.0 +h5py==2.7.1 +html5lib==0.9999999 +idna==2.6 +ipykernel==4.8.2 +ipython==6.4.0 +ipython-genutils==0.2.0 +ipywidgets==7.2.1 +jedi==0.12.0 +Jinja2==2.10 +jsonschema==2.6.0 +jupyter==1.0.0 +jupyter-client==5.2.3 +jupyter-console==5.2.0 +jupyter-core==4.4.0 +Keras==2.1.6 +kiwisolver==1.0.1 +Markdown==2.6.11 +MarkupSafe==1.0 +matplotlib==2.2.2 +mistune==0.8.3 +nbconvert==5.3.1 +nbformat==4.4.0 +notebook==5.5.0 +numpy==1.14.3 +pandas==0.23.0 +pandocfilters==1.4.2 +parso==0.2.1 +pexpect==4.5.0 +pickleshare==0.7.4 +pipenv==2018.5.18 +prompt-toolkit==1.0.15 +protobuf==3.5.2.post1 +ptyprocess==0.5.2 +pydot==1.2.4 +Pygments==2.2.0 +pyparsing==2.2.0 +python-dateutil==2.7.3 +pytz==2018.4 +PyYAML==3.12 +pyzmq==17.0.0 +qtconsole==4.3.1 +requests==2.18.4 +scikit-learn==0.19.1 +scipy==1.1.0 +selenium==3.12.0 +Send2Trash==1.5.0 +simplegeneric==0.8.1 +six==1.11.0 +sklearn==0.0 +tensorboard==1.8.0 +tensorflow==1.8.0 +termcolor==1.1.0 +terminado==0.8.1 +testpath==0.3.1 +tornado==5.0.2 +tqdm==4.23.4 +traitlets==4.3.2 +urllib3==1.22 +virtualenv==16.0.0 +virtualenv-clone==0.3.0 +wcwidth==0.1.7 +webencodings==0.5.1 +Werkzeug==0.14.1 +widgetsnbextension==3.2.1 diff --git a/src/adaboost_cdl.py b/src/adaboost_cdl.py new file mode 100644 index 0000000..a857069 --- /dev/null +++ b/src/adaboost_cdl.py @@ -0,0 +1,154 @@ +import logging +import pickle +import json +import numpy as np +import cdl +import json +import argparse +import math +from sklearn.model_selection import train_test_split +from sklearn.model_selection import KFold +from sklearn.metrics import accuracy_score +from keras.utils import plot_model +from keras.layers import Activation, LeakyReLU +from keras import optimizers +from keras import callbacks +from utils import custom_load, save_result + +def adaboost_predict(models, X, adaboost_error): + assert(len(models) == len(adaboost_error)) + pred_Y = np.zeros(X.shape[0]) + for model, error in zip(models, adaboost_error): + pred_model = model.predict(X) + pred_model[pred_model == 0] = -1. + pred_model = pred_model * math.log((1 - error) / error) + pred_Y = np.add(pred_Y, pred_model) + return pred_Y > 0 + +def main(config): + logging.info('Reading data') + X, Y, user_feature, item_feature, pred_X = custom_load(**config) + + num_user_feature = user_feature.shape[1] # 13 + num_item_feature = item_feature.shape[1] # 11 + + testing_acc = [] + training_acc = [] + adaboost_history = [] + n_estimater = 3 + k_fold = 3 + + for i, (train_index, test_index,) in enumerate(KFold(n_splits=k_fold, shuffle=True).split(X)): + X_train, X_test = X[train_index], X[test_index] + Y_train, Y_test = Y[train_index], Y[test_index] + + adaboost_models = [] + adaboost_error = [] + adaboost_original_train_acc = [] + adaboost_original_test_acc = [] + p = np.full((X_train.shape[0],), 1. / X_train.shape[0]) + for i in range(n_estimater): + smaple_index = np.random.choice(X_train.shape[0], size=X_train.shape[0], p=p) + model = cdl.CDL( + user_feature, item_feature, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + lamda_w=5, # regula + lamda_u=5, # User offset regu + lamda_v=5, # Item offset regu + encoder_noise=0.2, + dropout_rate=0.4, + activation=LeakyReLU(0.3)) + + model.fit( + X_train[smaple_index], Y_train[smaple_index], + X_test, Y_test, + lamda_n=5, # Item decode loss + lamda_m=5, # User decode loss + lamda_c=10, # Predict loss + batch_size=128, + epochs=300, + optimizer=optimizers.Adam(lr=0.001), + ) + + error_index = model.predict(X_train) != Y_train + # print(error_index) + error_value = p[error_index].sum() + adaboost_original_train_acc.append(model.get_accuracy(X_train, Y_train)) + adaboost_original_test_acc.append(model.get_accuracy(X_test, Y_test)) + adaboost_error.append(error_value) + adaboost_models.append(model) + p[np.logical_not(error_index)] *= error_value / (1 - error_value) + p /= p.sum() + + training_acc.append(accuracy_score(Y_train, adaboost_predict(adaboost_models, X_train, adaboost_error))) + testing_acc.append(accuracy_score(Y_test, adaboost_predict(adaboost_models, X_test, adaboost_error))) + adaboost_history.append({ + 'adaboost_original_train_acc': adaboost_original_train_acc, + 'adaboost_original_test_acc': adaboost_original_test_acc, + 'adaboost_error': adaboost_error, + }) + logging.info("Accuracy: %f" % testing_acc[-1]) + + for i in range(k_fold): + logging.info("%d-fold Training Accuracy: [%s]" % (i, adaboost_history[i]['adaboost_original_train_acc'])) + logging.info("%d-fold Testing Accuracy: [%s]" % (i, adaboost_history[i]['adaboost_original_test_acc'])) + logging.info("%d-fold Adaboost Error: [%s]" % (i, adaboost_history[i]['adaboost_error'])) + logging.info("%d-fold Training bagging result: %f" % (i, training_acc[i])) + logging.info("%d-fold Testing bagging result: %f" % (i, testing_acc[i])) + +def save_model(config): + + X, Y, users, items, pred_X = custom_load(**config) + + X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=1) + + num_user_feature = users.shape[1] # 13 + num_item_feature = items.shape[1] # 11 + + model = cdl.CDL( + users, items, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + lamda_w=5, # regula + lamda_u=5, # User offset regu + lamda_v=5, # Item offset regu + encoder_noise=0.2, + dropout_rate=0.4, + activation=LeakyReLU(0.3)) + + logging.info("Autoencoder init Entropy: %s" % str(model.get_autoencoder_loss())) + + model.fit( + X_train, Y_train, + X_test, Y_test, + lamda_n=5, # Item decode loss + lamda_m=5, # User decode loss + lamda_c=10, # Predict loss + batch_size=32, + epochs=300, + optimizer=optimizers.Adam(lr=0.001), + # callbacks=[callbacks.EarlyStopping( + # monitor='val_loss', + # min_delta=0, + # patience=5, + # verbose=1, + # mode='auto')] + ) + + logging.info("Autoencoder final Entropy: %s" % str(model.get_autoencoder_loss())) + logging.info("Accuracy: %f" % model.get_accuracy(X_test, Y_test)) + model.get_model().save('./models/CDL-%f.h5' % model.get_accuracy(X_test, Y_test)) + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s') + parser = argparse.ArgumentParser(description='PTT Crawer') + parser.add_argument('-c', '--config', type=str, default="./config.json") + parser.add_argument('-s', '--save_model', action='store_true') + args = parser.parse_args() + with open(args.config, 'r') as config_file: + config = json.loads(config_file.read()) + if args.save_model: + save_model(config) + else: + main(config) \ No newline at end of file diff --git a/src/bagging.py b/src/bagging.py new file mode 100644 index 0000000..3947f19 --- /dev/null +++ b/src/bagging.py @@ -0,0 +1,251 @@ +import logging +import pickle +import json +import cdl +import cdl_ffn +import cdl_fn +import scipy +import argparse +import math +import pandas as pd +import numpy as np +from sklearn.decomposition import PCA +from sklearn.model_selection import train_test_split +from sklearn.model_selection import KFold +from keras.utils import plot_model +from keras.layers import Activation, LeakyReLU +from keras import optimizers +from keras import callbacks +from utils import custom_load, save_result +from sklearn.preprocessing import StandardScaler +from sklearn.preprocessing import OneHotEncoder +from sklearn.model_selection import cross_val_score +from sklearn.model_selection import GridSearchCV +from sklearn.model_selection import RandomizedSearchCV +from sklearn.metrics import accuracy_score +from sklearn.metrics import classification_report +from sklearn.ensemble import AdaBoostClassifier +from sklearn.ensemble import RandomForestClassifier +from sklearn.tree import DecisionTreeClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.svm import SVC +from sklearn.linear_model import LogisticRegression +from sklearn.externals import joblib + +def bagging_predict(models, X_custom): + pred_Y = np.zeros(X_custom[0].shape[0]) + for model, X in zip(models, X_custom): + pred_Y = np.add(pred_Y, model.predict(X)) + return pred_Y > len(models) // 2 + +def expand(X, users, items): + ret = np.zeros((X.shape[0], users.shape[1] + 2 * items.shape[1])) + for i in range(X.shape[0]): + ret[i] = np.concatenate([users[X[i][0]], items[X[i][1]], items[X[i][2]]]) + return ret + +def get_accuracy(model, X, Y): + return accuracy_score(Y, model.predict(X)) + +def adaboost_predict(models, X_custom, adaboost_error): + assert(len(models) == len(adaboost_error)) + pred_Y = np.zeros(X_custom[0].shape[0]) + for model, X, error in zip(models, X_custom, adaboost_error): + pred_y = model.predict(X) + pred_y[pred_y == 0] = -1. + pred_y = pred_y * math.log((1 - error) / error) + pred_Y = np.add(pred_Y, pred_y) + return pred_Y > 0 + +def main(config): + logging.info('Reading data') + X, Y, users, items, pred_X = custom_load(**config) + + num_user_feature = users.shape[1] # 13 + num_item_feature = items.shape[1] # 11 + + estimaters = [RandomForestClassifier, SVC, LogisticRegression, KNeighborsClassifier, cdl.CDL, cdl_ffn.CDL, cdl_fn.CDL] + estimaters_cfg = { + cdl.CDL: { + 'build': { + 'users': users, + 'items': items, + 'user_layer_dim': [num_user_feature, 10, 7, 10, num_user_feature], + 'item_layer_dim': [num_item_feature, 7, num_item_feature], + 'lamda_w': 5, # regula + 'lamda_u': 5, # User offset regu + 'lamda_v': 5, # Item offset regu + 'encoder_noise': 0.2, + 'dropout_rate': 0.4, + 'activation': LeakyReLU(0.3) + }, + 'train': { + 'lamda_n': 5, # Item decode loss + 'lamda_m': 5, # User decode loss + 'lamda_c': 10, # Predict loss + 'batch_size': 128, + 'epochs': 250, + 'optimizer': optimizers.Adam(lr=0.001), + }, + 'need_PCA': False, + }, + cdl_ffn.CDL: { + 'build': { + 'users': users, + 'items': items, + 'user_layer_dim': [num_user_feature, 10, 7, 10, num_user_feature], + 'item_layer_dim': [num_item_feature, 7, num_item_feature], + 'predict_layer_dim': [10, 1], + 'lamda_w': 5, # regula + 'lamda_u': 5, # User offset regu + 'lamda_v': 5, # Item offset regu + 'encoder_noise': 0.1, + 'dropout_rate': 0.2, + 'activation': Activation('relu') + }, + 'train': { + 'lamda_n': 5, # Item decode loss + 'lamda_m': 5, # User decode loss + 'lamda_c': 10, # Predict loss + 'batch_size': 32, + 'epochs': 250, + 'optimizer': optimizers.Adam(lr=0.001), + }, + 'need_PCA': False, + }, + cdl_fn.CDL: { + 'build': { + 'users': users, + 'items': items, + 'user_layer_dim': [num_user_feature, 10, 7, 10, num_user_feature], + 'item_layer_dim': [num_item_feature, 7, num_item_feature], + 'predict_layer_dim': [14, 2], + 'lamda_w': 10, # regula + 'lamda_u': 1, # User offset regu + 'lamda_v': 1, # Item offset regu + 'encoder_noise': 0.2, + 'dropout_rate': 0.2, + 'activation': Activation('relu') + }, + 'train': { + 'lamda_n': 10, # Item decode loss + 'lamda_m': 10, # User decode loss + 'lamda_c': 100, # Predict loss + 'batch_size': 64, + 'epochs': 250, + 'optimizer': optimizers.Adam(lr=0.0005), + }, + 'need_PCA': False, + }, + RandomForestClassifier: { + 'build': { + 'n_estimators': 50, + 'max_depth': 2, + 'criterion': 'gini' + }, + 'train': {}, + 'need_PCA': True, + }, + SVC: { + 'build': { + 'C': 0.09, + }, + 'train': {}, + 'need_PCA': True, + }, + LogisticRegression: { + 'build': { + 'C': 0.1, + }, + 'train': {}, + 'need_PCA': True, + }, + KNeighborsClassifier: { + 'build': { + 'n_neighbors': 50, + }, + 'train': {}, + 'need_PCA': True, + } + } + + k_fold = 3 + testing_acc = [] + training_acc = [] + autoencoder_entropy = [] + autoencoder_pretrain_entropy = [] + bagging_history = [] + + for i, (train_index, test_index,) in enumerate(KFold(n_splits=k_fold, shuffle=True).split(X)): + X_train, X_test = X[train_index], X[test_index] + Y_train, Y_test = Y[train_index], Y[test_index] + + X_train_expand = expand(X_train, users, items) + X_test_expand = expand(X_test, users, items) + pca = PCA(n_components=5) + pca.fit(X_train_expand) + X_train_PCA = pca.transform(X_train_expand) + X_test_PCA = pca.transform(X_test_expand) + + bagging_models = [] + bagging_train_acc = [] + bagging_test_acc = [] + bagging_train_error = [] + + X_train_custom = [] + X_test_custom = [] + + p = np.full((X_train.shape[0],), 1. / X_train.shape[0]) + for estimater in estimaters: + # smaple_index = np.random.choice(X_train.shape[0], size=X_train.shape[0], p=p) + # smaple_index = np.random.choice(X_train.shape[0], X_train.shape[0],) + smaple_index = np.arange(X_train.shape[0]) + model = estimater(**estimaters_cfg[estimater]['build']) + if estimaters_cfg[estimater]['need_PCA']: + model.fit( + X_train_PCA, + Y_train[smaple_index], + **estimaters_cfg[estimater]['train']) + X_train_custom.append(X_train_PCA) + X_test_custom.append(X_test_PCA) + else: + model.fit( + X_train[smaple_index], + Y_train[smaple_index], + **estimaters_cfg[estimater]['train']) + X_train_custom.append(X_train) + X_test_custom.append(X_test) + + bagging_train_acc.append(get_accuracy(model, X_train_custom[-1], Y_train)) + bagging_test_acc.append(get_accuracy(model, X_test_custom[-1], Y_test)) + bagging_train_error.append(1 - bagging_train_acc[-1]) + bagging_models.append(model) + # p[np.logical_not(error_index)] *= error_value / (1 - error_value) + # p /= p.sum() + + training_acc.append(accuracy_score(Y_train, adaboost_predict(bagging_models, X_train_custom, bagging_train_error))) + testing_acc.append(accuracy_score(Y_test, adaboost_predict(bagging_models, X_test_custom, bagging_train_error))) + bagging_history.append({ + 'bagging_train_acc': bagging_train_acc, + 'bagging_test_acc': bagging_test_acc, + }) + logging.info("Accuracy: %f" % testing_acc[-1]) + + for i in range(k_fold): + logging.info("%d-fold Training Accuracy: %s" % (i, ["%.5f" % v for v in bagging_history[i]['bagging_train_acc']])) + logging.info("%d-fold Testing Accuracy: %s" % (i, ["%.5f" % v for v in bagging_history[i]['bagging_test_acc']])) + logging.info("%d-fold Training bagging result: %f" % (i, training_acc[i])) + logging.info("%d-fold Testing bagging result: %f" % (i, testing_acc[i])) + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s') + parser = argparse.ArgumentParser(description='PTT Crawer') + parser.add_argument('-c', '--config', type=str, default="./config.json") + parser.add_argument('-s', '--save_model', action='store_true') + args = parser.parse_args() + with open(args.config, 'r') as config_file: + config = json.loads(config_file.read()) + if args.save_model: + save_model(config) + else: + main(config) \ No newline at end of file diff --git a/src/bagging_cdl.py b/src/bagging_cdl.py new file mode 100644 index 0000000..f79c707 --- /dev/null +++ b/src/bagging_cdl.py @@ -0,0 +1,209 @@ +import logging +import pickle +import json +import numpy as np +import cdl +import cdl_fn +import cdl_ffn +import json +import argparse +import math +from sklearn.model_selection import train_test_split +from sklearn.model_selection import KFold +from sklearn.ensemble import RandomForestClassifier +from sklearn.metrics import accuracy_score +from keras.utils import plot_model +from keras.layers import Activation, LeakyReLU +from keras import optimizers +from keras import callbacks +from utils import custom_load, save_result + +def bagging_predict(models, X, accuracy): + pred_Y = np.zeros(X.shape[0]) + for model, acc in zip(models, accuracy): + pred_y = model.predict(X) + pred_y[pred_y == 0] = -1 + pred_y = pred_y * math.log(acc / (1 - acc)) + pred_Y = np.add(pred_Y, pred_y) + return pred_Y > 0 + +def main(config): + logging.info('Reading data') + X, Y, users, items, pred_X = custom_load(**config) + + num_user_feature = users.shape[1] # 13 + num_item_feature = items.shape[1] # 11 + + estimaters = [cdl.CDL, cdl_ffn.CDL, cdl_fn.CDL, ] + estimaters_cfg = { + cdl.CDL: { + 'build': { + 'users': users, + 'items': items, + 'user_layer_dim': [num_user_feature, 10, 7, 10, num_user_feature], + 'item_layer_dim': [num_item_feature, 7, num_item_feature], + 'lamda_w': 5, # regula + 'lamda_u': 5, # User offset regu + 'lamda_v': 5, # Item offset regu + 'encoder_noise': 0.2, + 'dropout_rate': 0.4, + 'activation': LeakyReLU(0.3) + }, + 'train': { + 'lamda_n': 5, # Item decode loss + 'lamda_m': 5, # User decode loss + 'lamda_c': 10, # Predict loss + 'batch_size': 128, + 'epochs': 300, + 'optimizer': optimizers.Adam(lr=0.001), + } + }, + cdl_ffn.CDL: { + 'build': { + 'users': users, + 'items': items, + 'user_layer_dim': [num_user_feature, 10, 7, 10, num_user_feature], + 'item_layer_dim': [num_item_feature, 7, num_item_feature], + 'predict_layer_dim': [10, 1], + 'lamda_w': 5, # regula + 'lamda_u': 5, # User offset regu + 'lamda_v': 5, # Item offset regu + 'encoder_noise': 0.1, + 'dropout_rate': 0.2, + 'activation': Activation('relu') + }, + 'train': { + 'lamda_n': 5, # Item decode loss + 'lamda_m': 5, # User decode loss + 'lamda_c': 10, # Predict loss + 'batch_size': 32, + 'epochs': 300, + 'optimizer': optimizers.Adam(lr=0.001), + } + }, + cdl_fn.CDL: { + 'build': { + 'users': users, + 'items': items, + 'user_layer_dim': [num_user_feature, 10, 7, 10, num_user_feature], + 'item_layer_dim': [num_item_feature, 7, num_item_feature], + 'predict_layer_dim': [14, 2], + 'lamda_w': 10, # regula + 'lamda_u': 1, # User offset regu + 'lamda_v': 1, # Item offset regu + 'encoder_noise': 0.2, + 'dropout_rate': 0.2, + 'activation': Activation('relu') + }, + 'train': { + 'lamda_n': 10, # Item decode loss + 'lamda_m': 10, # User decode loss + 'lamda_c': 100, # Predict loss + 'batch_size': 64, + 'epochs': 300, + 'optimizer': optimizers.Adam(lr=0.0005), + } + } + } + + k_fold = 3 + testing_acc = [] + training_acc = [] + autoencoder_entropy = [] + autoencoder_pretrain_entropy = [] + bagging_history = [] + + for i, (train_index, test_index,) in enumerate(KFold(n_splits=k_fold, shuffle=True).split(X)): + X_train, X_test = X[train_index], X[test_index] + Y_train, Y_test = Y[train_index], Y[test_index] + + bagging_models = [] + bagging_weight = [] + bagging_original_acc = [] + + for estimater in estimaters: + smaple_index = np.random.choice(X_train.shape[0], X_train.shape[0],) + smaple_index = np.arange(X_train.shape[0]) + model = estimater(**estimaters_cfg[estimater]['build']) + + model.fit( + X_train[smaple_index], + Y_train[smaple_index], + X_test, + Y_test, + **estimaters_cfg[estimater]['train'] + ) + + bagging_original_acc.append(model.get_accuracy(X_train, Y_train)) + bagging_weight.append(model.get_accuracy(X_test, Y_test)) + bagging_models.append(model) + + training_acc.append(accuracy_score(Y_train, bagging_predict(bagging_models, X_train, bagging_original_acc))) + testing_acc.append(accuracy_score(Y_test, bagging_predict(bagging_models, X_test, bagging_original_acc))) + bagging_history.append({ + 'bagging_original_acc': bagging_original_acc, + 'bagging_weight': bagging_weight, + }) + logging.info("Accuracy: %f" % testing_acc[-1]) + + for i in range(k_fold): + logging.info("%d-fold Training Accuracy: [%s]" % (i, bagging_history[i]['bagging_original_acc'])) + logging.info("%d-fold Testing Accuracy: [%s]" % (i, bagging_history[i]['bagging_weight'])) + logging.info("%d-fold Training bagging result: %f" % (i, training_acc[i])) + logging.info("%d-fold Testing bagging result: %f" % (i, testing_acc[i])) + +def save_model(config): + + X, Y, users, items, pred_X = custom_load(**config) + + X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=1) + + num_user_feature = users.shape[1] # 13 + num_item_feature = items.shape[1] # 11 + + model = cdl.CDL( + users, items, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + lamda_w=5, # regula + lamda_u=5, # User offset regu + lamda_v=5, # Item offset regu + encoder_noise=0.2, + dropout_rate=0.4, + activation=LeakyReLU(0.3)) + + logging.info("Autoencoder init Entropy: %s" % str(model.get_autoencoder_loss())) + + model.fit( + X_train, Y_train, + X_test, Y_test, + lamda_n=5, # Item decode loss + lamda_m=5, # User decode loss + lamda_c=10, # Predict loss + batch_size=32, + epochs=1, + optimizer=optimizers.Adam(lr=0.001), + # callbacks=[callbacks.EarlyStopping( + # monitor='val_loss', + # min_delta=0, + # patience=5, + # verbose=1, + # mode='auto')] + ) + + logging.info("Autoencoder final Entropy: %s" % str(model.get_autoencoder_loss())) + logging.info("Accuracy: %f" % model.get_accuracy(X_test, Y_test)) + model.get_model().save('./models/CDL-%f.h5' % model.get_accuracy(X_test, Y_test)) + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s') + parser = argparse.ArgumentParser(description='PTT Crawer') + parser.add_argument('-c', '--config', type=str, default="./config.json") + parser.add_argument('-s', '--save_model', action='store_true') + args = parser.parse_args() + with open(args.config, 'r') as config_file: + config = json.loads(config_file.read()) + if args.save_model: + save_model(config) + else: + main(config) \ No newline at end of file diff --git a/src/bagging_pure.py b/src/bagging_pure.py new file mode 100644 index 0000000..34140b5 --- /dev/null +++ b/src/bagging_pure.py @@ -0,0 +1,147 @@ +import logging +import pickle +import json +import numpy as np +import cdl +import json +import argparse +import math +from sklearn.model_selection import train_test_split +from sklearn.model_selection import KFold +from sklearn.metrics import accuracy_score +from keras.utils import plot_model +from keras.layers import Activation, LeakyReLU +from keras import optimizers +from keras import callbacks +from utils import custom_load, save_result + +def bagging_predict(models, X): + pred_Y = np.zeros(X.shape[0]) + for model in models: + pred_y = model.predict(X) + pred_Y = np.add(pred_Y, pred_y) + return pred_Y > len(models) // 2 + +def main(config): + logging.info('Reading data') + X, Y, user_feature, item_feature, pred_X = custom_load(**config) + + num_user_feature = user_feature.shape[1] # 13 + num_item_feature = item_feature.shape[1] # 11 + + testing_acc = [] + training_acc = [] + bagging_history = [] + n_estimater = 5 + k_fold = 5 + + for i, (train_index, test_index,) in enumerate(KFold(n_splits=k_fold, shuffle=True).split(X)): + X_train, X_test = X[train_index], X[test_index] + Y_train, Y_test = Y[train_index], Y[test_index] + + bagging_models = [] + bagging_original_train_acc = [] + bagging_original_test_acc = [] + for i in range(n_estimater): + smaple_index = np.random.choice(X_train.shape[0], size=X_train.shape[0]) + model = cdl.CDL( + user_feature, item_feature, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + lamda_u=32, + lamda_w=0.1344613232775228, + lamda_v=0.27891447944038594, + encoder_noise=0.2, + dropout_rate=0.4, + activation='selu') + + model.fit( + X_train[smaple_index], Y_train[smaple_index], + X_test, Y_test, + lamda_n=1.2001027195781027, + lamda_m=2.489385178928047, + lamda_c=47.80028677294353, + batch_size=150, + epochs=300, + optimizer=optimizers.Adam(lr=0.001), + callbacks=[callbacks.EarlyStopping( + monitor='val_loss', + min_delta=0, + patience=10, + verbose=0, + mode='auto')] + ) + + bagging_original_train_acc.append(model.get_accuracy(X_train, Y_train)) + bagging_original_test_acc.append(model.get_accuracy(X_test, Y_test)) + bagging_models.append(model) + + training_acc.append(accuracy_score(Y_train, bagging_predict(bagging_models, X_train))) + testing_acc.append(accuracy_score(Y_test, bagging_predict(bagging_models, X_test))) + bagging_history.append({ + 'bagging_original_train_acc': bagging_original_train_acc, + 'bagging_original_test_acc': bagging_original_test_acc, + }) + logging.info("Accuracy: %f" % testing_acc[-1]) + + for i in range(k_fold): + logging.info("%d-fold Training Accuracy: [%s]" % (i, bagging_history[i]['bagging_original_train_acc'])) + logging.info("%d-fold Testing Accuracy: [%s]" % (i, bagging_history[i]['bagging_original_test_acc'])) + logging.info("%d-fold Training bagging result: %f" % (i, training_acc[i])) + logging.info("%d-fold Testing bagging result: %f" % (i, testing_acc[i])) + +def save_model(config): + + X, Y, users, items, pred_X = custom_load(**config) + + X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=1) + + num_user_feature = users.shape[1] # 13 + num_item_feature = items.shape[1] # 11 + + model = cdl.CDL( + users, items, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + lamda_w=5, # regula + lamda_u=5, # User offset regu + lamda_v=5, # Item offset regu + encoder_noise=0.2, + dropout_rate=0.4, + activation=LeakyReLU(0.3)) + + logging.info("Autoencoder init Entropy: %s" % str(model.get_autoencoder_loss())) + + model.fit( + X_train, Y_train, + X_test, Y_test, + lamda_n=5, # Item decode loss + lamda_m=5, # User decode loss + lamda_c=10, # Predict loss + batch_size=32, + epochs=300, + optimizer=optimizers.Adam(lr=0.001), + # callbacks=[callbacks.EarlyStopping( + # monitor='val_loss', + # min_delta=0, + # patience=5, + # verbose=1, + # mode='auto')] + ) + + logging.info("Autoencoder final Entropy: %s" % str(model.get_autoencoder_loss())) + logging.info("Accuracy: %f" % model.get_accuracy(X_test, Y_test)) + model.get_model().save('./models/CDL-%f.h5' % model.get_accuracy(X_test, Y_test)) + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s') + parser = argparse.ArgumentParser(description='PTT Crawer') + parser.add_argument('-c', '--config', type=str, default="./config.json") + parser.add_argument('-s', '--save_model', action='store_true') + args = parser.parse_args() + with open(args.config, 'r') as config_file: + config = json.loads(config_file.read()) + if args.save_model: + save_model(config) + else: + main(config) \ No newline at end of file diff --git a/src/cdl.py b/src/cdl.py new file mode 100644 index 0000000..99f5af1 --- /dev/null +++ b/src/cdl.py @@ -0,0 +1,248 @@ +import numpy as np +import logging +from sklearn.metrics import accuracy_score, log_loss +from keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout, Add, Concatenate, Softmax, Activation, LeakyReLU +from keras.layers.noise import GaussianNoise +from keras.layers.normalization import BatchNormalization +from keras.initializers import RandomNormal, RandomUniform +from keras.regularizers import l2 +from keras.callbacks import EarlyStopping, TensorBoard +from keras.engine.topology import Layer +from keras.models import Model +from keras import optimizers +from keras import backend as K + +class CDL: + def __init__( + self, + users, + items, + user_layer_dim=[13, 10, 7, 10, 13], + item_layer_dim=[11, 7, 11], + lamda_w=0.1, + lamda_u=0.1, + lamda_v=0.1, + encoder_noise=0.1, + dropout_rate=0.1, + activation='relu', + model=None): + ''' + item_layer_dim = a list of integer indicating the embedding dimension of autoencoder + user_layer_dim = a list of integer indicating the embedding dimension of autoencoder + items = item feature matrix with shape (# of item, # of item features) + users = user feature matrix with shape (# of user, # of user features) + ''' + assert(len(user_layer_dim) >= 3) + assert(len(item_layer_dim) >= 3) + assert(user_layer_dim[len(user_layer_dim) // 2] == item_layer_dim[len(item_layer_dim) // 2]) + + self.items = items + self.users = users + + if model is not None: + self.cdl_model = model + return + + item_layer_dim = np.array(item_layer_dim) + user_layer_dim = np.array(user_layer_dim) + item_feature_dim = item_layer_dim[0] + user_feature_dim = user_layer_dim[0] + item_embedding_dim = item_layer_dim[item_layer_dim.size // 2] + user_embedding_dim = user_layer_dim[user_layer_dim.size // 2] + num_user = self.users.shape[0] + 1 + num_item = self.items.shape[0] + 1 + + # User Autoencoder: encoder + # logging.info("User Autoencoder with dim [%s]." % ", ".join(str(v) for v in user_layer_dim)) + encode_input_dim = user_layer_dim[0] + user_encode_input_layer = Input(shape=(encode_input_dim,), name="user_encode_input_layer") + user_encode_output_layer = GaussianNoise(stddev=encoder_noise)(user_encode_input_layer) + for i in range(1, user_layer_dim.size // 2 + 1): + user_encode_output_layer = Dense(user_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(user_encode_output_layer) + if i == user_layer_dim.size // 2: + self.user_encode_model = Model(inputs=user_encode_input_layer, outputs=user_encode_output_layer, name="user_encoder") + user_encode_output_layer = Dropout(dropout_rate)(user_encode_output_layer) + # user_encode_output_layer = BatchNormalization()(user_encode_output_layer) + user_encode_output_layer = Activation(activation)(user_encode_output_layer) + # User Autoencoder: decoder + decode_input_dim = user_layer_dim[user_layer_dim.size // 2] + user_decode_input_layer = Input(shape=(decode_input_dim,), name="user_decode_input_layer") + user_decode_output_layer = GaussianNoise(stddev=encoder_noise)(user_decode_input_layer) + for i in range(user_layer_dim.size // 2 + 1, user_layer_dim.size): + user_decode_output_layer = Dense(user_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(user_decode_output_layer) + if i == user_layer_dim.size - 1: + # user_decode_output_layer = BatchNormalization()(user_decode_output_layer) + user_decode_output_layer = Activation('sigmoid')(user_decode_output_layer) + self.user_decode_model = Model(inputs=user_decode_input_layer, outputs=user_decode_output_layer, name="user_decoder") + else: + user_decode_output_layer = Dropout(dropout_rate)(user_decode_output_layer) + # user_decode_output_layer = BatchNormalization()(user_decode_output_layer) + user_decode_output_layer = Activation(activation)(user_decode_output_layer) + # User Autoencoder + self.user_ae = Model(inputs=self.user_encode_model.inputs, outputs=self.user_decode_model(self.user_encode_model.outputs), name="user_autoencoder") + + # Item Autoencoder: encoder + # logging.info("Item Autoencoder with dim [%s]." % ", ".join(str(v) for v in item_layer_dim)) + encode_input_dim = item_layer_dim[0] + item_encode_input_layer = Input(shape=(encode_input_dim,), name="item_encode_input_layer") + item_encode_output_layer = GaussianNoise(stddev=encoder_noise)(item_encode_input_layer) + for i in range(1, item_layer_dim.size // 2 + 1): + item_encode_output_layer = Dense(item_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(item_encode_output_layer) + if i == item_layer_dim.size // 2: + self.item_encode_model = Model(inputs=item_encode_input_layer, outputs=item_encode_output_layer, name="item_encoder") + item_encode_output_layer = Dropout(dropout_rate)(item_encode_output_layer) + # item_encode_output_layer = BatchNormalization()(item_encode_output_layer) + item_encode_output_layer = Activation(activation)(item_encode_output_layer) + # Item Autoencoder: decoder + decode_input_dim = item_layer_dim[item_layer_dim.size // 2] + item_decode_input_layer = Input(shape=(decode_input_dim,), name="item_decode_input_layer") + item_decode_output_layer = GaussianNoise(stddev=encoder_noise)(item_decode_input_layer) + for i in range(item_layer_dim.size // 2 + 1, item_layer_dim.size): + item_decode_output_layer = Dense(item_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(item_decode_output_layer) + if i == item_layer_dim.size - 1: + # item_decode_output_layer = BatchNormalization()(item_decode_output_layer) + item_decode_output_layer = Activation('sigmoid')(item_decode_output_layer) + self.item_decode_model = Model(inputs=item_decode_input_layer, outputs=item_decode_output_layer, name="item_decoder") + else: + item_decode_output_layer = Dropout(dropout_rate)(item_decode_output_layer) + # item_decode_output_layer = BatchNormalization()(item_decode_output_layer) + item_decode_output_layer = Activation(activation)(item_decode_output_layer) + # Item Autoencoder + self.item_ae = Model(inputs=self.item_encode_model.inputs, outputs=self.item_decode_model(self.item_encode_model.outputs), name="item_autoencoder") + + # User embedding + user_input_layer = Input(shape=(1,), name='user_input') + user_feature_input_layer = Input(shape=(user_feature_dim,), name="user_feature_input_layer") + user_offset_Vector = Embedding(input_dim=num_user, output_dim=user_embedding_dim, input_length=1, name='user_offset_vector', embeddings_regularizer=l2(lamda_u), embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05))(user_input_layer) + user_offset_Vector = Flatten(name='user_flatten')(user_offset_Vector) + user_encoded_layer = self.user_encode_model(user_feature_input_layer) + user_decoded_layer = self.user_decode_model(user_encoded_layer) + user_embedding_layer = Add()([user_encoded_layer, user_offset_Vector]) + + item_shared_embedding_layer = Embedding(input_dim=num_item, output_dim=item_embedding_dim, input_length=1, name='item_shared_offset_vector', embeddings_regularizer=l2(lamda_v), embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05)) + # Item1 embedding + item1_input_layer = Input(shape=(1,), name='item1_input') + item1_feature_input_layer = Input(shape=(item_feature_dim,), name="item1_feature_input_layer") + item1_offset_vector = item_shared_embedding_layer(item1_input_layer) + item1_offset_vector = Flatten(name='item1_flatten')(item1_offset_vector) + item1_encoded_layer = self.item_encode_model(item1_feature_input_layer) + item1_decoded_layer = self.item_decode_model(item1_encoded_layer) + item1_embedding_layer = Add()([item1_encoded_layer, item1_offset_vector]) + + # Item2 embedding + item2_input_layer = Input(shape=(1,), name='item2_input') + item2_feature_input_layer = Input(shape=(item_feature_dim,), name="item2_feature_input_layer") + item2_offset_vector = item_shared_embedding_layer(item2_input_layer) + item2_offset_vector = Flatten(name='item2_flatten')(item2_offset_vector) + item2_encoded_layer = self.item_encode_model(item2_feature_input_layer) + item2_decoded_layer = self.item_decode_model(item2_encoded_layer) + item2_embedding_layer = Add()([item2_encoded_layer, item2_offset_vector]) + + # rating prediction + rating1 = Dot(axes = -1, name='dot1_layer')([user_embedding_layer, item1_embedding_layer]) + rating2 = Dot(axes = -1, name='dot2_layer')([user_embedding_layer, item2_embedding_layer]) + output_layer = Softmax(name="output_softmax")(BatchNormalization()(Concatenate()([rating1, rating2]))) + + # Create Model + self.cdl_model = Model( + inputs=[ + user_input_layer, + item1_input_layer, + item2_input_layer, + user_feature_input_layer, + item1_feature_input_layer, + item2_feature_input_layer], + outputs=[ + output_layer, + user_decoded_layer, + item1_decoded_layer, + item2_decoded_layer], + name="CDL") + + # self.cdl_model.summary() + + def pretrain(self, + user_optimizer="adam", user_batch_size=5, user_epochs=50, + item_optimizer="adam", item_batch_size=2, item_epochs=50, + callbacks=[], + verbose=1): + self.user_ae.compile(loss='binary_crossentropy', optimizer=user_optimizer) + self.user_ae.fit(x=self.users, y=self.users, batch_size=user_batch_size, epochs=user_epochs, callbacks=callbacks, verbose=verbose) + + self.item_ae.compile(loss='binary_crossentropy', optimizer=item_optimizer) + self.item_ae.fit(x=self.items, y=self.items, batch_size=item_batch_size, epochs=item_epochs, callbacks=callbacks, verbose=verbose) + + def fit(self, + X_train, + Y_train, + X_test=None, + Y_test=None, + lamda_n=0.1, + lamda_m=0.1, + lamda_c=10, + optimizer="adam", + callbacks=[], + batch_size=64, + epochs=10): + ''' + Fine-tuning with rating prediction + ''' + # callbacks.append(TensorBoard(write_images=True)) + self.cdl_model.compile( + optimizer=optimizer, + loss=['categorical_crossentropy', 'binary_crossentropy', 'binary_crossentropy', 'binary_crossentropy'], + loss_weights=[lamda_c, lamda_m, lamda_n, lamda_n]) + # metrics=['categorical_accuracy', 'binary_accuracy', 'binary_accuracy', 'binary_accuracy']) + + train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature, train_label = self.matrix2input(X_train, Y_train) + + if X_test is not None: + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, Y_test) + validation_data = ( + [test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature], + [test_label, test_user_feature, test_item1_feature, test_item2_feature], + ) + else: + validation_data = None + + model_history = self.cdl_model.fit( + x=[train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature], + y=[train_label, train_user_feature, train_item1_feature, train_item2_feature], + verbose=1, + validation_data=validation_data, + epochs=epochs, + batch_size=batch_size, + callbacks=callbacks) + return model_history + + def get_autoencoder_loss(self): + user_entropy = log_loss(self.users, self.user_ae.predict(self.users)) + item_entropy = log_loss(self.items, self.item_ae.predict(self.items)) + return {'user_entropy': user_entropy, 'item_entropy': item_entropy} + + def matrix2input(self, X_train, Y_train): + train_user = X_train[:, 0].reshape(-1, 1).astype(int) + train_item1 = X_train[:, 1].reshape(-1, 1).astype(int) + train_item2 = X_train[:, 2].reshape(-1, 1).astype(int) + train_user_feature = self.users[train_user.flatten()] + train_item1_feature = self.items[train_item1.flatten()] + train_item2_feature = self.items[train_item2.flatten()] + if Y_train is not None: + train_label = Y_train.reshape(-1, 1) + train_label = np.concatenate((train_label, 1 - train_label), axis=1) + else: + train_label = None + return train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature, train_label + + def get_accuracy(self, X_test, Y_test): + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, Y_test) + pred_out = np.array(self.cdl_model.predict([test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature])[0]) + return accuracy_score(test_label[:,0], 1 - np.argmax(pred_out, axis=1)) + + def predict(self, X_test): + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, None) + pred_out = np.array(self.cdl_model.predict([test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature])[0]) + return 1 - np.argmax(pred_out, axis=1) + + def get_model(self): + return self.cdl_model \ No newline at end of file diff --git a/src/cdl_dense.py b/src/cdl_dense.py new file mode 100644 index 0000000..d5f7512 --- /dev/null +++ b/src/cdl_dense.py @@ -0,0 +1,248 @@ +import numpy as np +import logging +from sklearn.metrics import accuracy_score, log_loss +from keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout, Add, Concatenate, Softmax, Activation, LeakyReLU +from keras.layers.noise import GaussianNoise +from keras.layers.normalization import BatchNormalization +from keras.initializers import RandomNormal, RandomUniform +from keras.regularizers import l2 +from keras.callbacks import EarlyStopping, TensorBoard +from keras.engine.topology import Layer +from keras.models import Model +from keras import optimizers +from keras import backend as K + +class CDL: + def __init__( + self, + users, + items, + user_layer_dim=[13, 10, 7, 10, 13], + item_layer_dim=[11, 7, 11], + lamda_w=0.1, + lamda_u=0.1, + lamda_v=0.1, + encoder_noise=0.1, + dropout_rate=0.1, + activation=Activation('relu'), + model=None): + ''' + item_layer_dim = a list of integer indicating the embedding dimension of autoencoder + user_layer_dim = a list of integer indicating the embedding dimension of autoencoder + items = item feature matrix with shape (# of item, # of item features) + users = user feature matrix with shape (# of user, # of user features) + ''' + assert(len(user_layer_dim) >= 3) + assert(len(item_layer_dim) >= 3) + assert(user_layer_dim[len(user_layer_dim) // 2] == item_layer_dim[len(item_layer_dim) // 2]) + + self.items = items + self.users = users + + if model is not None: + self.cdl_model = model + return + + item_layer_dim = np.array(item_layer_dim) + user_layer_dim = np.array(user_layer_dim) + item_feature_dim = item_layer_dim[0] + user_feature_dim = user_layer_dim[0] + item_embedding_dim = item_layer_dim[item_layer_dim.size // 2] + user_embedding_dim = user_layer_dim[user_layer_dim.size // 2] + num_user = self.users.shape[0] + 1 + num_item = self.items.shape[0] + 1 + + # User Autoencoder: encoder + # logging.info("User Autoencoder with dim [%s]." % ", ".join(str(v) for v in user_layer_dim)) + encode_input_dim = user_layer_dim[0] + user_encode_input_layer = Input(shape=(encode_input_dim,), name="user_encode_input_layer") + user_encode_output_layer = GaussianNoise(stddev=encoder_noise)(user_encode_input_layer) + for i in range(1, user_layer_dim.size // 2 + 1): + user_encode_output_layer = Dense(user_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(user_encode_output_layer) + if i == user_layer_dim.size // 2: + self.user_encode_model = Model(inputs=user_encode_input_layer, outputs=user_encode_output_layer, name="user_encoder") + user_encode_output_layer = Dropout(dropout_rate)(user_encode_output_layer) + user_encode_output_layer = BatchNormalization()(user_encode_output_layer) + user_encode_output_layer = activation(user_encode_output_layer) + # User Autoencoder: decoder + decode_input_dim = user_layer_dim[user_layer_dim.size // 2] + user_decode_input_layer = Input(shape=(decode_input_dim,), name="user_decode_input_layer") + user_decode_output_layer = GaussianNoise(stddev=encoder_noise)(user_decode_input_layer) + for i in range(user_layer_dim.size // 2 + 1, user_layer_dim.size): + user_decode_output_layer = Dense(user_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(user_decode_output_layer) + if i == user_layer_dim.size - 1: + user_decode_output_layer = BatchNormalization()(user_decode_output_layer) + user_decode_output_layer = Activation('sigmoid')(user_decode_output_layer) + self.user_decode_model = Model(inputs=user_decode_input_layer, outputs=user_decode_output_layer, name="user_decoder") + else: + user_decode_output_layer = Dropout(dropout_rate)(user_decode_output_layer) + user_decode_output_layer = BatchNormalization()(user_decode_output_layer) + user_decode_output_layer = activation(user_decode_output_layer) + # User Autoencoder + self.user_ae = Model(inputs=self.user_encode_model.inputs, outputs=self.user_decode_model(self.user_encode_model.outputs), name="user_autoencoder") + + # Item Autoencoder: encoder + # logging.info("Item Autoencoder with dim [%s]." % ", ".join(str(v) for v in item_layer_dim)) + encode_input_dim = item_layer_dim[0] + item_encode_input_layer = Input(shape=(encode_input_dim,), name="item_encode_input_layer") + item_encode_output_layer = GaussianNoise(stddev=encoder_noise)(item_encode_input_layer) + for i in range(1, item_layer_dim.size // 2 + 1): + item_encode_output_layer = Dense(item_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(item_encode_output_layer) + if i == item_layer_dim.size // 2: + self.item_encode_model = Model(inputs=item_encode_input_layer, outputs=item_encode_output_layer, name="item_encoder") + item_encode_output_layer = Dropout(dropout_rate)(item_encode_output_layer) + item_encode_output_layer = BatchNormalization()(item_encode_output_layer) + item_encode_output_layer = activation(item_encode_output_layer) + # Item Autoencoder: decoder + decode_input_dim = item_layer_dim[item_layer_dim.size // 2] + item_decode_input_layer = Input(shape=(decode_input_dim,), name="item_decode_input_layer") + item_decode_output_layer = GaussianNoise(stddev=encoder_noise)(item_decode_input_layer) + for i in range(item_layer_dim.size // 2 + 1, item_layer_dim.size): + item_decode_output_layer = Dense(item_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(item_decode_output_layer) + if i == item_layer_dim.size - 1: + item_decode_output_layer = BatchNormalization()(item_decode_output_layer) + item_decode_output_layer = Activation('sigmoid')(item_decode_output_layer) + self.item_decode_model = Model(inputs=item_decode_input_layer, outputs=item_decode_output_layer, name="item_decoder") + else: + item_decode_output_layer = Dropout(dropout_rate)(item_decode_output_layer) + item_decode_output_layer = BatchNormalization()(item_decode_output_layer) + item_decode_output_layer = activation(item_decode_output_layer) + # Item Autoencoder + self.item_ae = Model(inputs=self.item_encode_model.inputs, outputs=self.item_decode_model(self.item_encode_model.outputs), name="item_autoencoder") + + # User embedding + user_input_layer = Input(shape=(1,), name='user_input') + user_feature_input_layer = Input(shape=(user_feature_dim,), name="user_feature_input_layer") + user_offset_Vector = Embedding(input_dim=num_user, output_dim=user_embedding_dim, input_length=1, name='user_offset_vector', embeddings_regularizer=l2(lamda_u), embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05))(user_input_layer) + user_offset_Vector = Flatten(name='user_flatten')(user_offset_Vector) + user_encoded_layer = self.user_encode_model(user_feature_input_layer) + user_decoded_layer = self.user_decode_model(user_encoded_layer) + user_embedding_layer = Add()([user_encoded_layer, user_offset_Vector]) + + item_shared_embedding_layer = Embedding(input_dim=num_item, output_dim=item_embedding_dim, input_length=1, name='item_shared_offset_vector', embeddings_regularizer=l2(lamda_v), embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05)) + # Item1 embedding + item1_input_layer = Input(shape=(1,), name='item1_input') + item1_feature_input_layer = Input(shape=(item_feature_dim,), name="item1_feature_input_layer") + item1_offset_vector = item_shared_embedding_layer(item1_input_layer) + item1_offset_vector = Flatten(name='item1_flatten')(item1_offset_vector) + item1_encoded_layer = self.item_encode_model(item1_feature_input_layer) + item1_decoded_layer = self.item_decode_model(item1_encoded_layer) + item1_embedding_layer = Add()([item1_encoded_layer, item1_offset_vector]) + + # Item2 embedding + item2_input_layer = Input(shape=(1,), name='item2_input') + item2_feature_input_layer = Input(shape=(item_feature_dim,), name="item2_feature_input_layer") + item2_offset_vector = item_shared_embedding_layer(item2_input_layer) + item2_offset_vector = Flatten(name='item2_flatten')(item2_offset_vector) + item2_encoded_layer = self.item_encode_model(item2_feature_input_layer) + item2_decoded_layer = self.item_decode_model(item2_encoded_layer) + item2_embedding_layer = Add()([item2_encoded_layer, item2_offset_vector]) + + # rating prediction + rating1 = Dot(axes = -1, name='dot1_layer')([user_embedding_layer, item1_embedding_layer]) + rating2 = Dot(axes = -1, name='dot2_layer')([user_embedding_layer, item2_embedding_layer]) + output_layer = Softmax(name="output_softmax")(BatchNormalization()(Concatenate()([rating1, rating2]))) + + # Create Model + self.cdl_model = Model( + inputs=[ + user_input_layer, + item1_input_layer, + item2_input_layer, + user_feature_input_layer, + item1_feature_input_layer, + item2_feature_input_layer], + outputs=[ + output_layer, + user_decoded_layer, + item1_decoded_layer, + item2_decoded_layer], + name="CDL") + + # self.cdl_model.summary() + + def pretrain(self, + user_optimizer="adam", user_batch_size=5, user_epochs=50, + item_optimizer="adam", item_batch_size=2, item_epochs=50, + callbacks=[], + verbose=1): + self.user_ae.compile(loss='binary_crossentropy', optimizer=user_optimizer) + self.user_ae.fit(x=self.users, y=self.users, batch_size=user_batch_size, epochs=user_epochs, callbacks=callbacks, verbose=verbose) + + self.item_ae.compile(loss='binary_crossentropy', optimizer=item_optimizer) + self.item_ae.fit(x=self.items, y=self.items, batch_size=item_batch_size, epochs=item_epochs, callbacks=callbacks, verbose=verbose) + + def fit(self, + X_train, + Y_train, + X_test=None, + Y_test=None, + lamda_n=0.1, + lamda_m=0.1, + lamda_c=10, + optimizer="adam", + callbacks=[], + batch_size=64, + epochs=10): + ''' + Fine-tuning with rating prediction + ''' + # callbacks.append(TensorBoard(write_images=True)) + self.cdl_model.compile( + optimizer=optimizer, + loss=['categorical_crossentropy', 'binary_crossentropy', 'binary_crossentropy', 'binary_crossentropy'], + loss_weights=[lamda_c, lamda_m, lamda_n, lamda_n]) + # metrics=['categorical_accuracy', 'binary_accuracy', 'binary_accuracy', 'binary_accuracy']) + + train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature, train_label = self.matrix2input(X_train, Y_train) + + if X_test is not None: + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, Y_test) + validation_data = ( + [test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature], + [test_label, test_user_feature, test_item1_feature, test_item2_feature], + ) + else: + validation_data = None + + model_history = self.cdl_model.fit( + x=[train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature], + y=[train_label, train_user_feature, train_item1_feature, train_item2_feature], + verbose=1, + validation_data=validation_data, + epochs=epochs, + batch_size=batch_size, + callbacks=callbacks) + return model_history + + def get_autoencoder_loss(self): + user_entropy = log_loss(self.users, self.user_ae.predict(self.users)) + item_entropy = log_loss(self.items, self.item_ae.predict(self.items)) + return {'user_entropy': user_entropy, 'item_entropy': item_entropy} + + def matrix2input(self, X_train, Y_train): + train_user = X_train[:, 0].reshape(-1, 1).astype(int) + train_item1 = X_train[:, 1].reshape(-1, 1).astype(int) + train_item2 = X_train[:, 2].reshape(-1, 1).astype(int) + train_user_feature = self.users[train_user.flatten()] + train_item1_feature = self.items[train_item1.flatten()] + train_item2_feature = self.items[train_item2.flatten()] + if Y_train is not None: + train_label = Y_train.reshape(-1, 1) + train_label = np.concatenate((train_label, 1 - train_label), axis=1) + else: + train_label = None + return train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature, train_label + + def get_accuracy(self, X_test, Y_test): + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, Y_test) + pred_out = np.array(self.cdl_model.predict([test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature])[0]) + return accuracy_score(test_label[:,0], 1 - np.argmax(pred_out, axis=1)) + + def predict(self, X_test): + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, None) + pred_out = np.array(self.cdl_model.predict([test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature])[0]) + return 1 - np.argmax(pred_out, axis=1) + + def get_model(self): + return self.cdl_model \ No newline at end of file diff --git a/src/cdl_ffn.py b/src/cdl_ffn.py new file mode 100644 index 0000000..4c653d6 --- /dev/null +++ b/src/cdl_ffn.py @@ -0,0 +1,257 @@ +import numpy as np +import logging +from sklearn.metrics import accuracy_score, log_loss +from keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout, Add, Concatenate, Softmax, Activation, Multiply +from keras.layers.noise import GaussianNoise +from keras.layers.normalization import BatchNormalization +from keras.initializers import RandomNormal, RandomUniform +from keras.regularizers import l2 +from keras.callbacks import EarlyStopping +from keras.engine.topology import Layer +from keras.models import Model +from keras import optimizers +from keras import backend as K + +class CDL: + def __init__( + self, + users, + items, + user_layer_dim=[13, 10, 7, 10, 13], + item_layer_dim=[11, 7, 11], + predict_layer_dim=[10, 1], + lamda_w=0.1, + lamda_u=0.1, + lamda_v=0.1, + encoder_noise=0.1, + dropout_rate=0.1, + activation=Activation('relu'), + model=None): + ''' + item_layer_dim = a list of integer indicating the embedding dimension of autoencoder + user_layer_dim = a list of integer indicating the embedding dimension of autoencoder + items = item feature matrix with shape (# of item, # of item features) + users = user feature matrix with shape (# of user, # of user features) + ''' + assert(len(user_layer_dim) >= 3) + assert(len(item_layer_dim) >= 3) + assert(user_layer_dim[len(user_layer_dim) // 2] == item_layer_dim[len(item_layer_dim) // 2]) + + self.items = items + self.users = users + + if model is not None: + self.cdl_model = model + return + + item_layer_dim = np.array(item_layer_dim) + user_layer_dim = np.array(user_layer_dim) + item_feature_dim = item_layer_dim[0] + user_feature_dim = user_layer_dim[0] + item_embedding_dim = item_layer_dim[item_layer_dim.size // 2] + user_embedding_dim = user_layer_dim[user_layer_dim.size // 2] + num_user = self.users.shape[0] + 1 + num_item = self.items.shape[0] + 1 + predict_layer_dim = np.array(predict_layer_dim) + + # User Autoencoder: encoder + # logging.info("User Autoencoder with dim [%s]." % ", ".join(str(v) for v in user_layer_dim)) + encode_input_dim = user_layer_dim[0] + user_encode_input_layer = Input(shape=(encode_input_dim,), name="user_encode_input_layer") + user_encode_output_layer = GaussianNoise(stddev=encoder_noise)(user_encode_input_layer) + for i in range(1, user_layer_dim.size // 2 + 1): + user_encode_output_layer = Dense(user_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(user_encode_output_layer) + if i == user_layer_dim.size // 2: + self.user_encode_model = Model(inputs=user_encode_input_layer, outputs=user_encode_output_layer, name="user_encoder") + user_encode_output_layer = Dropout(dropout_rate)(user_encode_output_layer) + user_encode_output_layer = BatchNormalization()(user_encode_output_layer) + user_encode_output_layer = activation(user_encode_output_layer) + # User Autoencoder: decoder + decode_input_dim = user_layer_dim[user_layer_dim.size // 2] + user_decode_input_layer = Input(shape=(decode_input_dim,), name="user_decode_input_layer") + user_decode_output_layer = GaussianNoise(stddev=encoder_noise)(user_decode_input_layer) + for i in range(user_layer_dim.size // 2 + 1, user_layer_dim.size): + user_decode_output_layer = Dense(user_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(user_decode_output_layer) + if i == user_layer_dim.size - 1: + user_decode_output_layer = BatchNormalization()(user_decode_output_layer) + user_decode_output_layer = Activation('sigmoid')(user_decode_output_layer) + self.user_decode_model = Model(inputs=user_decode_input_layer, outputs=user_decode_output_layer, name="user_decoder") + else: + user_decode_output_layer = Dropout(dropout_rate)(user_decode_output_layer) + user_decode_output_layer = BatchNormalization()(user_decode_output_layer) + user_decode_output_layer = activation(user_decode_output_layer) + # User Autoencoder + self.user_ae = Model(inputs=self.user_encode_model.inputs, outputs=self.user_decode_model(self.user_encode_model.outputs), name="user_autoencoder") + + # Item Autoencoder: encoder + # logging.info("Item Autoencoder with dim [%s]." % ", ".join(str(v) for v in item_layer_dim)) + encode_input_dim = item_layer_dim[0] + item_encode_input_layer = Input(shape=(encode_input_dim,), name="item_encode_input_layer") + item_encode_output_layer = GaussianNoise(stddev=encoder_noise)(item_encode_input_layer) + for i in range(1, item_layer_dim.size // 2 + 1): + item_encode_output_layer = Dense(item_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(item_encode_output_layer) + if i == item_layer_dim.size // 2: + self.item_encode_model = Model(inputs=item_encode_input_layer, outputs=item_encode_output_layer, name="item_encoder") + item_encode_output_layer = Dropout(dropout_rate)(item_encode_output_layer) + item_encode_output_layer = BatchNormalization()(item_encode_output_layer) + item_encode_output_layer = activation(item_encode_output_layer) + # Item Autoencoder: decoder + decode_input_dim = item_layer_dim[item_layer_dim.size // 2] + item_decode_input_layer = Input(shape=(decode_input_dim,), name="item_decode_input_layer") + item_decode_output_layer = GaussianNoise(stddev=encoder_noise)(item_decode_input_layer) + for i in range(item_layer_dim.size // 2 + 1, item_layer_dim.size): + item_decode_output_layer = Dense(item_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(item_decode_output_layer) + if i == item_layer_dim.size - 1: + item_decode_output_layer = BatchNormalization()(item_decode_output_layer) + item_decode_output_layer = Activation('sigmoid')(item_decode_output_layer) + self.item_decode_model = Model(inputs=item_decode_input_layer, outputs=item_decode_output_layer, name="item_decoder") + else: + item_decode_output_layer = Dropout(dropout_rate)(item_decode_output_layer) + item_decode_output_layer = BatchNormalization()(item_decode_output_layer) + item_decode_output_layer = activation(item_decode_output_layer) + # Item Autoencoder + self.item_ae = Model(inputs=self.item_encode_model.inputs, outputs=self.item_decode_model(self.item_encode_model.outputs), name="item_autoencoder") + + # User embedding + user_input_layer = Input(shape=(1,), name='user_input') + user_feature_input_layer = Input(shape=(user_feature_dim,), name="user_feature_input_layer") + user_offset_Vector = Embedding(input_dim=num_user, output_dim=user_embedding_dim, input_length=1, name='user_offset_vector', embeddings_regularizer=l2(lamda_u), embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05))(user_input_layer) + user_offset_Vector = Flatten(name='user_flatten')(user_offset_Vector) + user_encoded_layer = self.user_encode_model(user_feature_input_layer) + user_decoded_layer = self.user_decode_model(user_encoded_layer) + user_embedding_layer = Add()([user_encoded_layer, user_offset_Vector]) + + item_shared_embedding_layer = Embedding(input_dim=num_item, output_dim=item_embedding_dim, input_length=1, name='item_shared_offset_vector', embeddings_regularizer=l2(lamda_v), embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05)) + # Item1 embedding + item1_input_layer = Input(shape=(1,), name='item1_input') + item1_feature_input_layer = Input(shape=(item_feature_dim,), name="item1_feature_input_layer") + item1_offset_vector = item_shared_embedding_layer(item1_input_layer) + item1_offset_vector = Flatten(name='item1_flatten')(item1_offset_vector) + item1_encoded_layer = self.item_encode_model(item1_feature_input_layer) + item1_decoded_layer = self.item_decode_model(item1_encoded_layer) + item1_embedding_layer = Add()([item1_encoded_layer, item1_offset_vector]) + + # Item2 embedding + item2_input_layer = Input(shape=(1,), name='item2_input') + item2_feature_input_layer = Input(shape=(item_feature_dim,), name="item2_feature_input_layer") + item2_offset_vector = item_shared_embedding_layer(item2_input_layer) + item2_offset_vector = Flatten(name='item2_flatten')(item2_offset_vector) + item2_encoded_layer = self.item_encode_model(item2_feature_input_layer) + item2_decoded_layer = self.item_decode_model(item2_encoded_layer) + item2_embedding_layer = Add()([item2_encoded_layer, item2_offset_vector]) + + # rating prediction model + predict_input_layer = Input(shape=(user_embedding_dim + item_embedding_dim,), name='predict_input_layer') + predict_hidden_layer = predict_input_layer + for i, layer_dim in enumerate(predict_layer_dim): + predict_hidden_layer = Dense(layer_dim, kernel_initializer=RandomNormal(mean=0.0, stddev=0.1), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(predict_hidden_layer) + # predict_hidden_layer = Dropout(dropout_rate)(predict_hidden_layer) + predict_hidden_layer = BatchNormalization()(predict_hidden_layer) + if i != predict_layer_dim.size - 1: + predict_hidden_layer = activation(predict_hidden_layer) + predict_model = Model(inputs=predict_input_layer, outputs=predict_hidden_layer, name="predict_model") + + rating1 = predict_model(Concatenate()([user_embedding_layer, item1_embedding_layer])) + rating2 = predict_model(Concatenate()([user_embedding_layer, item2_embedding_layer])) + output_layer = Softmax()(Concatenate()([rating1, rating2])) + + # Create CDL Model + self.cdl_model = Model( + inputs=[ + user_input_layer, + item1_input_layer, + item2_input_layer, + user_feature_input_layer, + item1_feature_input_layer, + item2_feature_input_layer], + outputs=[ + output_layer, + user_decoded_layer, + item1_decoded_layer, + item2_decoded_layer], + name="CDL") + + def pretrain_autoencoder(self, + user_optimizer="adam", user_batch_size=5, user_epochs=50, + item_optimizer="adam", item_batch_size=2, item_epochs=50, + callbacks=[], + verbose=1): + # Pretrain Item Autoencoder: encoder + self.user_ae.compile(loss='binary_crossentropy', optimizer=user_optimizer) + self.user_ae.fit(x=self.users, y=self.users, batch_size=user_batch_size, epochs=user_epochs, callbacks=callbacks, verbose=verbose) + # Pretrain Item Autoencoder + self.item_ae.compile(loss='binary_crossentropy', optimizer=item_optimizer) + self.item_ae.fit(x=self.items, y=self.items, batch_size=item_batch_size, epochs=item_epochs, callbacks=callbacks, verbose=verbose) + + def fit(self, + X_train, + Y_train, + X_test=None, + Y_test=None, + lamda_n=1, # Item decode loss + lamda_m=1, # User decode loss + lamda_c=10, # Predict loss + optimizer="adam", + callbacks=[], + batch_size=64, + epochs=10): + ''' + Fine-tuning with rating prediction + ''' + self.cdl_model.compile( + optimizer=optimizer, + loss=['categorical_crossentropy', 'binary_crossentropy', 'binary_crossentropy', 'binary_crossentropy'], + loss_weights=[lamda_c, lamda_m, lamda_n, lamda_n]) + + train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature, train_label = self.matrix2input(X_train, Y_train) + + if X_test is not None: + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, Y_test) + validation_data = ( + [test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature], + [test_label, test_user_feature, test_item1_feature, test_item2_feature], + ) + else: + validation_data = None + + model_history = self.cdl_model.fit( + x=[train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature], + y=[train_label, train_user_feature, train_item1_feature, train_item2_feature], + verbose=1, + validation_data=validation_data, + epochs=epochs, + batch_size=batch_size, + callbacks=callbacks) + return model_history + + def matrix2input(self, X_train, Y_train): + train_user = X_train[:, 0].reshape(-1, 1).astype(int) + train_item1 = X_train[:, 1].reshape(-1, 1).astype(int) + train_item2 = X_train[:, 2].reshape(-1, 1).astype(int) + train_user_feature = self.users[train_user.flatten()] + train_item1_feature = self.items[train_item1.flatten()] + train_item2_feature = self.items[train_item2.flatten()] + if Y_train is not None: + train_label = Y_train.reshape(-1, 1) + train_label = np.concatenate((train_label, 1 - train_label), axis=1) + else: + train_label = None + return train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature, train_label + + def get_autoencoder_loss(self): + user_entropy = log_loss(self.users, self.user_ae.predict(self.users)) + item_entropy = log_loss(self.items, self.item_ae.predict(self.items)) + return {'user_entropy': user_entropy, 'item_entropy': item_entropy} + + def get_accuracy(self, X_test, Y_test): + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, Y_test) + pred_out = np.array(self.cdl_model.predict([test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature])[0]) + return accuracy_score(test_label[:,0], 1 - np.argmax(pred_out, axis=1)) + + def predict(self, X_test): + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, None) + pred_out = np.array(self.cdl_model.predict([test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature])[0]) + return 1 - np.argmax(pred_out, axis=1) + + def get_model(self): + return self.cdl_model \ No newline at end of file diff --git a/src/cdl_fn.py b/src/cdl_fn.py new file mode 100644 index 0000000..dc323fd --- /dev/null +++ b/src/cdl_fn.py @@ -0,0 +1,258 @@ +import numpy as np +import logging +from sklearn.metrics import accuracy_score, log_loss +from keras.layers import Input, Embedding, Dot, Flatten, Dense, Dropout, Add, Concatenate, Softmax, Activation, Multiply +from keras.layers.noise import GaussianNoise +from keras.layers.normalization import BatchNormalization +from keras.initializers import RandomNormal, RandomUniform +from keras.regularizers import l2 +from keras.callbacks import EarlyStopping +from keras.engine.topology import Layer +from keras.models import Model +from keras import optimizers +from keras import backend as K + +class CDL: + def __init__( + self, + users, + items, + user_layer_dim=[13, 10, 7, 10, 13], + item_layer_dim=[11, 7, 11], + predict_layer_dim=[10, 1], + lamda_w=0.1, + lamda_u=0.1, + lamda_v=0.1, + encoder_noise=0.1, + dropout_rate=0.1, + activation=Activation('relu'), + model=None): + ''' + item_layer_dim = a list of integer indicating the embedding dimension of autoencoder + user_layer_dim = a list of integer indicating the embedding dimension of autoencoder + items = item feature matrix with shape (# of item, # of item features) + users = user feature matrix with shape (# of user, # of user features) + ''' + assert(len(user_layer_dim) >= 3) + assert(len(item_layer_dim) >= 3) + assert(user_layer_dim[len(user_layer_dim) // 2] == item_layer_dim[len(item_layer_dim) // 2]) + + self.items = items + self.users = users + + if model is not None: + self.cdl_model = model + return + + item_layer_dim = np.array(item_layer_dim) + user_layer_dim = np.array(user_layer_dim) + item_feature_dim = item_layer_dim[0] + user_feature_dim = user_layer_dim[0] + item_embedding_dim = item_layer_dim[item_layer_dim.size // 2] + user_embedding_dim = user_layer_dim[user_layer_dim.size // 2] + num_user = self.users.shape[0] + 1 + num_item = self.items.shape[0] + 1 + predict_layer_dim = np.array(predict_layer_dim) + + # User Autoencoder: encoder + # logging.info("User Autoencoder with dim [%s]." % ", ".join(str(v) for v in user_layer_dim)) + encode_input_dim = user_layer_dim[0] + user_encode_input_layer = Input(shape=(encode_input_dim,), name="user_encode_input_layer") + user_encode_output_layer = GaussianNoise(stddev=encoder_noise)(user_encode_input_layer) + for i in range(1, user_layer_dim.size // 2 + 1): + user_encode_output_layer = Dense(user_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(user_encode_output_layer) + if i == user_layer_dim.size // 2: + self.user_encode_model = Model(inputs=user_encode_input_layer, outputs=user_encode_output_layer, name="user_encoder") + user_encode_output_layer = Dropout(dropout_rate)(user_encode_output_layer) + user_encode_output_layer = BatchNormalization()(user_encode_output_layer) + user_encode_output_layer = activation(user_encode_output_layer) + # User Autoencoder: decoder + decode_input_dim = user_layer_dim[user_layer_dim.size // 2] + user_decode_input_layer = Input(shape=(decode_input_dim,), name="user_decode_input_layer") + user_decode_output_layer = GaussianNoise(stddev=encoder_noise)(user_decode_input_layer) + for i in range(user_layer_dim.size // 2 + 1, user_layer_dim.size): + user_decode_output_layer = Dense(user_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(user_decode_output_layer) + if i == user_layer_dim.size - 1: + user_decode_output_layer = BatchNormalization()(user_decode_output_layer) + user_decode_output_layer = Activation('sigmoid')(user_decode_output_layer) + self.user_decode_model = Model(inputs=user_decode_input_layer, outputs=user_decode_output_layer, name="user_decoder") + else: + user_decode_output_layer = Dropout(dropout_rate)(user_decode_output_layer) + user_decode_output_layer = BatchNormalization()(user_decode_output_layer) + user_decode_output_layer = activation(user_decode_output_layer) + # User Autoencoder + self.user_ae = Model(inputs=self.user_encode_model.inputs, outputs=self.user_decode_model(self.user_encode_model.outputs), name="user_autoencoder") + + # Item Autoencoder: encoder + # logging.info("Item Autoencoder with dim [%s]." % ", ".join(str(v) for v in item_layer_dim)) + encode_input_dim = item_layer_dim[0] + item_encode_input_layer = Input(shape=(encode_input_dim,), name="item_encode_input_layer") + item_encode_output_layer = GaussianNoise(stddev=encoder_noise)(item_encode_input_layer) + for i in range(1, item_layer_dim.size // 2 + 1): + item_encode_output_layer = Dense(item_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(item_encode_output_layer) + if i == item_layer_dim.size // 2: + self.item_encode_model = Model(inputs=item_encode_input_layer, outputs=item_encode_output_layer, name="item_encoder") + item_encode_output_layer = Dropout(dropout_rate)(item_encode_output_layer) + item_encode_output_layer = BatchNormalization()(item_encode_output_layer) + item_encode_output_layer = activation(item_encode_output_layer) + # Item Autoencoder: decoder + decode_input_dim = item_layer_dim[item_layer_dim.size // 2] + item_decode_input_layer = Input(shape=(decode_input_dim,), name="item_decode_input_layer") + item_decode_output_layer = GaussianNoise(stddev=encoder_noise)(item_decode_input_layer) + for i in range(item_layer_dim.size // 2 + 1, item_layer_dim.size): + item_decode_output_layer = Dense(item_layer_dim[i], kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(item_decode_output_layer) + if i == item_layer_dim.size - 1: + item_decode_output_layer = BatchNormalization()(item_decode_output_layer) + item_decode_output_layer = Activation('sigmoid')(item_decode_output_layer) + self.item_decode_model = Model(inputs=item_decode_input_layer, outputs=item_decode_output_layer, name="item_decoder") + else: + item_decode_output_layer = Dropout(dropout_rate)(item_decode_output_layer) + item_decode_output_layer = BatchNormalization()(item_decode_output_layer) + item_decode_output_layer = activation(item_decode_output_layer) + # Item Autoencoder + self.item_ae = Model(inputs=self.item_encode_model.inputs, outputs=self.item_decode_model(self.item_encode_model.outputs), name="item_autoencoder") + + # User embedding + user_input_layer = Input(shape=(1,), name='user_input') + user_feature_input_layer = Input(shape=(user_feature_dim,), name="user_feature_input_layer") + user_offset_Vector = Embedding(input_dim=num_user, output_dim=user_embedding_dim, input_length=1, name='user_offset_vector', embeddings_regularizer=l2(lamda_u), embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05))(user_input_layer) + user_offset_Vector = Flatten(name='user_flatten')(user_offset_Vector) + user_encoded_layer = self.user_encode_model(user_feature_input_layer) + user_decoded_layer = self.user_decode_model(user_encoded_layer) + user_embedding_layer = Add()([user_encoded_layer, user_offset_Vector]) + + item_shared_embedding_layer = Embedding(input_dim=num_item, output_dim=item_embedding_dim, input_length=1, name='item_shared_offset_vector', embeddings_regularizer=l2(lamda_v), embeddings_initializer=RandomNormal(mean=0.0, stddev=0.05)) + # Item1 embedding + item1_input_layer = Input(shape=(1,), name='item1_input') + item1_feature_input_layer = Input(shape=(item_feature_dim,), name="item1_feature_input_layer") + item1_offset_vector = item_shared_embedding_layer(item1_input_layer) + item1_offset_vector = Flatten(name='item1_flatten')(item1_offset_vector) + item1_encoded_layer = self.item_encode_model(item1_feature_input_layer) + item1_decoded_layer = self.item_decode_model(item1_encoded_layer) + item1_embedding_layer = Add()([item1_encoded_layer, item1_offset_vector]) + + # Item2 embedding + item2_input_layer = Input(shape=(1,), name='item2_input') + item2_feature_input_layer = Input(shape=(item_feature_dim,), name="item2_feature_input_layer") + item2_offset_vector = item_shared_embedding_layer(item2_input_layer) + item2_offset_vector = Flatten(name='item2_flatten')(item2_offset_vector) + item2_encoded_layer = self.item_encode_model(item2_feature_input_layer) + item2_decoded_layer = self.item_decode_model(item2_encoded_layer) + item2_embedding_layer = Add()([item2_encoded_layer, item2_offset_vector]) + + # rating prediction model + predict_input_layer = Input(shape=(user_embedding_dim + 2 * item_embedding_dim,), name='predict_input_layer') + predict_hidden_layer = predict_input_layer + for i, layer_dim in enumerate(predict_layer_dim): + predict_hidden_layer = Dense(layer_dim, kernel_initializer=RandomNormal(mean=0.0, stddev=0.1), kernel_regularizer=l2(lamda_w), bias_regularizer=l2(lamda_w))(predict_hidden_layer) + predict_hidden_layer = Dropout(dropout_rate)(predict_hidden_layer) + predict_hidden_layer = BatchNormalization()(predict_hidden_layer) + if i != predict_layer_dim.size - 1: + predict_hidden_layer = activation(predict_hidden_layer) + else: + predict_hidden_layer = Activation('softmax')(predict_hidden_layer) + predict_model = Model(inputs=predict_input_layer, outputs=predict_hidden_layer, name="predict_model") + + output_layer = predict_model(Concatenate()([user_embedding_layer, item1_embedding_layer, item2_embedding_layer])) + + # Create CDL Model + self.cdl_model = Model( + inputs=[ + user_input_layer, + item1_input_layer, + item2_input_layer, + user_feature_input_layer, + item1_feature_input_layer, + item2_feature_input_layer], + outputs=[ + output_layer, + user_decoded_layer, + item1_decoded_layer, + item2_decoded_layer], + name="CDL") + + def pretrain_autoencoder(self, + user_optimizer="adam", user_batch_size=5, user_epochs=50, + item_optimizer="adam", item_batch_size=2, item_epochs=50, + callbacks=[], + verbose=1): + # Pretrain Item Autoencoder: encoder + self.user_ae.compile(loss='binary_crossentropy', optimizer=user_optimizer) + self.user_ae.fit(x=self.users, y=self.users, batch_size=user_batch_size, epochs=user_epochs, callbacks=callbacks, verbose=verbose) + # Pretrain Item Autoencoder + self.item_ae.compile(loss='binary_crossentropy', optimizer=item_optimizer) + self.item_ae.fit(x=self.items, y=self.items, batch_size=item_batch_size, epochs=item_epochs, callbacks=callbacks, verbose=verbose) + + def fit(self, + X_train, + Y_train, + X_test=None, + Y_test=None, + lamda_n=1, # Item decode loss + lamda_m=1, # User decode loss + lamda_c=10, # Predict loss + optimizer="adam", + callbacks=[], + batch_size=64, + epochs=10): + ''' + Fine-tuning with rating prediction + ''' + self.cdl_model.compile( + optimizer=optimizer, + loss=['categorical_crossentropy', 'binary_crossentropy', 'binary_crossentropy', 'binary_crossentropy'], + loss_weights=[lamda_c, lamda_m, lamda_n, lamda_n]) + + train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature, train_label = self.matrix2input(X_train, Y_train) + if X_test is not None: + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, Y_test) + validation_data = ( + [test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature], + [test_label, test_user_feature, test_item1_feature, test_item2_feature], + ) + else: + validation_data = None + + # print(train_label.shape) + + model_history = self.cdl_model.fit( + x=[train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature], + y=[train_label, train_user_feature, train_item1_feature, train_item2_feature], + verbose=1, + validation_data=validation_data, + epochs=epochs, + batch_size=batch_size, + callbacks=callbacks) + return model_history + + def matrix2input(self, X_train, Y_train): + train_user = X_train[:, 0].reshape(-1, 1).astype(int) + train_item1 = X_train[:, 1].reshape(-1, 1).astype(int) + train_item2 = X_train[:, 2].reshape(-1, 1).astype(int) + train_user_feature = self.users[train_user.flatten()] + train_item1_feature = self.items[train_item1.flatten()] + train_item2_feature = self.items[train_item2.flatten()] + if Y_train is not None: + train_label = Y_train.reshape(-1, 1) + train_label = np.concatenate((train_label, 1 - train_label), axis=1) + else: + train_label = None + return train_user, train_item1, train_item2, train_user_feature, train_item1_feature, train_item2_feature, train_label + + def get_autoencoder_loss(self): + user_entropy = log_loss(self.users, self.user_ae.predict(self.users)) + item_entropy = log_loss(self.items, self.item_ae.predict(self.items)) + return {'user_entropy': user_entropy, 'item_entropy': item_entropy} + + def get_accuracy(self, X_test, Y_test): + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, Y_test) + pred_out = np.array(self.cdl_model.predict([test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature])[0]) + return accuracy_score(test_label[:,0], 1 - np.argmax(pred_out, axis=1)) + + def predict(self, X_test): + test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature, test_label = self.matrix2input(X_test, None) + pred_out = np.array(self.cdl_model.predict([test_user, test_item1, test_item2, test_user_feature, test_item1_feature, test_item2_feature])[0]) + return 1 - np.argmax(pred_out, axis=1) + + def get_model(self): + return self.cdl_model \ No newline at end of file diff --git a/src/classifier-script.py b/src/classifier-script.py new file mode 100644 index 0000000..3931ba8 --- /dev/null +++ b/src/classifier-script.py @@ -0,0 +1,192 @@ +from sklearn.preprocessing import StandardScaler +from sklearn.preprocessing import OneHotEncoder +from sklearn.model_selection import cross_val_score +from sklearn.model_selection import train_test_split +from sklearn.model_selection import GridSearchCV +from sklearn.model_selection import RandomizedSearchCV +from sklearn.metrics import accuracy_score +from sklearn.metrics import classification_report +from sklearn.ensemble import AdaBoostClassifier +from sklearn.ensemble import RandomForestClassifier +from sklearn.tree import DecisionTreeClassifier +from sklearn.neighbors import KNeighborsClassifier +from sklearn.decomposition import PCA +from sklearn.svm import SVC +from sklearn.externals import joblib +from sklearn.linear_model import LogisticRegression +import pandas as pd +import numpy as np +import scipy +import logging +import argparse +import json +from utils import custom_load + +RANDOM_FOREST_PARAM = {'n_estimators': 42, 'max_depth': 4, 'criterion': 'gini', 'class_weight': None} + +SVM_PARAM = {'C': 4.2436520852238155, 'gamma': 0.02430979318330913, 'class_weight': 'balanced'} + +LOGISTIC_REG_PARAM = {'C': 0.04} + +def expand(X, users, items): + ret = np.zeros((X.shape[0], users.shape[1] + 2 * items.shape[1])) + for i in range(X.shape[0]): + ret[i] = np.concatenate([users[X[i][0]], items[X[i][1]], items[X[i][2]]]) + return ret + +def save_model(config): + X, Y, users, items, pred_X = custom_load(**config) + + train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=0.33, random_state=1) + + rd_model = RandomForestClassifier(**RANDOM_FOREST_PARAM) + rd_model.fit(expand(train_X, users, items), train_Y) + rd_acc = accuracy_score(test_Y, rd_model.predict(expand(test_X, users, items))) + print("Randomforest: %f" % rd_acc) + joblib.dump(rd_model, './models/Randomforest-%f.pkl' % rd_acc) + + svm_model = SVC(**SVM_PARAM) + svm_model.fit(expand(train_X, users, items), train_Y) + svm_acc = accuracy_score(test_Y, svm_model.predict(expand(test_X, users, items))) + print("SVM: %f" % svm_acc) + joblib.dump(svm_model, './models/SVM-%f.pkl' % svm_acc) + + logistic_reg_model = LogisticRegression(**LOGISTIC_REG_PARAM) + logistic_reg_model.fit(expand(train_X, users, items), train_Y) + logistic_reg_acc = accuracy_score(test_Y, logistic_reg_model.predict(expand(test_X, users, items))) + print("LogisticRegression: %f" % logistic_reg_acc) + joblib.dump(logistic_reg_model, './models/LogisticRegression-%f.pkl' % logistic_reg_acc) + +def main(config): + X, Y, users, items, pred_X = custom_load(**config) + + X = expand(X, users, items) + + train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=0.33) + train_X, dev_X, train_Y, dev_Y = train_test_split(train_X, train_Y, test_size=0.2) + + # DecisionTree + # iteration = 1000 + # params = { + # 'criterion': np.random.choice(['gini', 'entropy'], iteration), + # 'splitter': np.random.choice(['best', 'random'], iteration), + # 'max_depth': np.random.randint(1, 3, size=iteration), + # } + + # for param in [dict(zip(params.keys(), value)) for value in zip(*params.values())]: + # dt_clf = DecisionTreeClassifier(**param) + # dt_clf.fit(train_X, train_Y) + # train_acc = accuracy_score(train_Y, dt_clf.predict(train_X)) + # dev_acc = accuracy_score(dev_Y, dt_clf.predict(dev_X)) + # overfit = (train_acc - dev_acc) + + # dt_clf = DecisionTreeClassifier(**param) + # dt_clf.fit(np.concatenate((train_X, dev_X), axis=0), np.concatenate((train_Y, dev_Y), axis=0)) + # test_acc = accuracy_score(test_Y, dt_clf.predict(test_X)) + # if overfit < 0.02 and test_acc > 0.6: + # print(param) + # print('Train: %.5f, Test: %.5f, OverFit: %.5f, Test: %.5f' % (train_acc, dev_acc, overfit, test_acc)) + + train_X = PCA(n_components=5).fit_transform(train_X) + print(train_X) + print(np.mean(cross_val_score(RandomForestClassifier(**{ + 'n_estimators': 50, + 'max_depth': 2, + 'criterion': 'gini'}), train_X, train_Y, cv=5))) + + + # RandomForest + # iteration = 100 + # params = { + # 'n_estimators': np.random.randint(50, size=iteration) + 1, + # 'max_depth': list(np.random.choice([1, 2, 3, 4], iteration)), + # 'criterion': list(np.random.choice(['gini', 'entropy'], iteration)), + # } + + # for param in [dict(zip(params.keys(), value)) for value in zip(*params.values())]: + # rdf_clf = RandomForestClassifier(**param) + # rdf_clf.fit(train_X, train_Y) + # train_acc = accuracy_score(train_Y, rdf_clf.predict(train_X)) + # dev_acc = accuracy_score(dev_Y, rdf_clf.predict(dev_X)) + # overfit = (train_acc - dev_acc) + + # rdf_clf = RandomForestClassifier(**param) + # rdf_clf.fit(np.concatenate((train_X, dev_X), axis=0), np.concatenate((train_Y, dev_Y), axis=0)) + # test_acc = accuracy_score(test_Y, rdf_clf.predict(test_X)) + # if overfit < 0.02 and test_acc > 0.64: + # print(param) + # print('Train: %.5f, Test: %.5f, OverFit: %.5f, Test: %.5f' % (train_acc, dev_acc, overfit, test_acc)) + + # SVM + # iteration = 100 + # params = { + # 'C': scipy.stats.expon(scale=10).rvs(size=iteration), + # 'gamma': scipy.stats.expon(scale=0.1).rvs(size=iteration), + # 'class_weight': np.random.choice(['balanced', None], size=iteration) + # } + # candidate = [] + # for C, gamma, class_weight in zip(params['C'], params['gamma'], params['class_weight']): + # svm_clf = SVC(C=C, gamma=gamma, class_weight=class_weight) + # svm_clf.fit(train_X, train_Y) + # train_acc = accuracy_score(train_Y, svm_clf.predict(train_X)) + # dev_acc = accuracy_score(dev_Y, svm_clf.predict(dev_X)) + # overfit = (train_acc - dev_acc) / train_acc + # svm_clf = SVC(C=C, gamma=gamma, class_weight=class_weight) + # svm_clf.fit(np.concatenate((train_X, dev_X), axis=0), np.concatenate((train_Y, dev_Y), axis=0)) + # test_acc = accuracy_score(test_Y, svm_clf.predict(test_X)) + # if overfit < 0.03 and test_acc > 0.65: + # print({'C': C, 'gamma': gamma, 'class_weight': class_weight}) + # print('Train: %.5f, Test: %.5f, OverFit: %.5f, Test: %.5f' % (train_acc, dev_acc, overfit, test_acc)) + + # AdaBoost + SVM + # bdt_svm_clf = tuning(AdaBoostClassifier(), + # { + # 'base_estimator': [SVC()], + # 'n_estimators': np.arange(10, 201, 10), + # 'algorithm': ['SAMME'] + # }, + # train_X, dev_X, train_Y, dev_Y + # ) + # print('Train acc:', accuracy_score(train_Y, bdt_svm_clf.predict(train_X))) + # print('Test acc:', accuracy_score(test_Y, bdt_svm_clf.predict(test_X))) + # predict_svm_clf = AdaBoostClassifier(**bdt_svm_clf.best_params_) + # predict_svm_clf.fit(feature, label) + # save_result(pred_event, predict_svm_clf.predict(test_X), "./adaboost-svm-result.csv") + + # AdaBoost + DecisionTree + # for n_estimators in range(1, 40): + # dbt_dt_clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2), algorithm='SAMME', n_estimators=n_estimators) # 7 + # dbt_dt_clf.fit(train_X, train_Y) + # train_acc = accuracy_score(train_Y, dbt_dt_clf.predict(train_X)) + # test_acc = accuracy_score(test_Y, dbt_dt_clf.predict(test_X)) + + # predict_svm_clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2), algorithm='SAMME', n_estimators=n_estimators) + # predict_svm_clf.fit(feature, label) + # kfold_acc = accuracy_score(label, dbt_dt_clf.predict(feature)) + # overfit = (train_acc - test_acc) / train_acc + # print('N: %2d, Train: %.5f, Test: %.5f, OverFit: %.5f, 5-Fold: %.5f' % (n_estimators, train_acc, test_acc, overfit, kfold_acc)) + + # LogisticRegression + # log_clf = LogisticRegression(C=0.04) + # log_clf.fit(expand(X, users, items), Y) + # print('LogisticRegression Accuracy: %f' % accuracy_score(test_Y, log_clf.predict(expand(test_X, users, items)))) + + # Output + # final_clf = SVC(**{'C': 5.329572790120016, 'gamma': 0.0005022557449742615, 'class_weight': None}) + # final_clf.fit(feature, label) + # save_result(pred_event, final_clf.predict(test_X), "./svm(5.329-0.0005-None)-result.csv") + + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s') + parser = argparse.ArgumentParser(description='PTT Crawer') + parser.add_argument('-c', '--config', type=str, default="./config.json") + parser.add_argument('-s', '--save_model', action='store_true') + args = parser.parse_args() + with open(args.config, 'r') as config_file: + config = json.loads(config_file.read()) + if args.save_model: + save_model(config) + else: + main(config) \ No newline at end of file diff --git a/src/main_cdl.py b/src/main_cdl.py new file mode 100644 index 0000000..5867720 --- /dev/null +++ b/src/main_cdl.py @@ -0,0 +1,126 @@ +import logging +import pickle +import json +import numpy as np +import cdl +import json +import argparse +from sklearn.model_selection import train_test_split +from sklearn.model_selection import KFold +from keras.utils import plot_model +from keras.layers import Activation, LeakyReLU +from keras import optimizers +from keras import callbacks +from utils import custom_load, save_result + +def main(config): + X, Y, users, items, pred_X = custom_load(**config) + + num_user_feature = users.shape[1] # 13 + num_item_feature = items.shape[1] # 11 + + iteration = 1000 + k_fold = 5 + + params = { + 'build': { + 'lamda_w': np.random.choice(np.logspace(-5, 5, num=20, base=2.0), iteration), # regula + 'lamda_u': np.random.choice(np.logspace(-5, 5, num=20, base=2.0), iteration), # User offset regu + 'lamda_v': np.random.choice(np.logspace(-5, 5, num=20, base=2.0), iteration), # Item offset regu + }, + 'train': { + 'lamda_n': np.random.choice(np.logspace(-5, 5, num=20, base=2.0), iteration), # Item decode loss + 'lamda_m': np.random.choice(np.logspace(-5, 5, num=20, base=2.0), iteration), # User decode loss + 'lamda_c': np.random.choice(np.logspace(-5, 10, num=20, base=2.0), iteration), # Predict loss + 'batch_size': np.random.choice([32, 64, 100, 128, 150], iteration), + } + } + + params_build = [dict(zip(params['build'].keys(), value)) for value in zip(*params['build'].values())] + params_train = [dict(zip(params['train'].keys(), value)) for value in zip(*params['train'].values())] + + for param_build, param_train in zip(params_build, params_train): + + testing_acc = [] + training_acc = [] + autoencoder_entropy = [] + autoencoder_pretrain_entropy = [] + for i, (train_index, test_index,) in enumerate(KFold(n_splits=k_fold, shuffle=True).split(X)): + X_train, X_test = X[train_index], X[test_index] + Y_train, Y_test = Y[train_index], Y[test_index] + + model = cdl.CDL( + users, items, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + **param_build, + encoder_noise=0.2, + dropout_rate=0.4, + activation='selu') + + autoencoder_pretrain_entropy.append(model.get_autoencoder_loss()) + # logging.info("Autoencoder Entropy: %s" % str(autoencoder_pretrain_entropy[-1])) + + model_history = model.fit( + X_train, Y_train, + X_test, Y_test, + **param_train, + epochs=300, + optimizer=optimizers.Adam(lr=0.001), + callbacks=[callbacks.EarlyStopping( + monitor='val_loss', + min_delta=0, + patience=10, + verbose=1, + mode='auto')] + ) + autoencoder_entropy.append(model.get_autoencoder_loss()) + + # Save history + # with open('./history/fold-%d.history' % i, 'wb') as fout: + # pickle.dump(model_history.history, fout) + # Save model to png + # plot_model(model.cdl_model, to_file='model.png') + + testing_acc.append(model.get_accuracy(X_test, Y_test)) + training_acc.append(model.get_accuracy(X_train, Y_train)) + # logging.info("Accuracy: %f" % testing_acc[-1]) + + logging.info("Build Param: %s" % param_build) + logging.info("Train Param: %s" % param_train) + + logging.info("Initial Autoencoder Entropy") + logging.info("\tUser: %f" % np.mean([v['user_entropy'] for v in autoencoder_pretrain_entropy])) + logging.info("\tItem: %f" % np.mean([v['item_entropy'] for v in autoencoder_pretrain_entropy])) + + logging.info("Final Autoencoder Entropy:") + logging.info("\tUser: %f" % np.mean([v['user_entropy'] for v in autoencoder_entropy])) + logging.info("\tItem: %f" % np.mean([v['item_entropy'] for v in autoencoder_entropy])) + + logging.info("Training:") + logging.info("\tK-fold Accuracy: %s" % training_acc) + logging.info("\tAverage Accuracy: %f" % np.array(training_acc).mean()) + + logging.info("Testing:") + logging.info("\tK-fold Accuracy: %s" % testing_acc) + logging.info("\tAverage Accuracy: %f" % np.array(testing_acc).mean()) + logging.info("---------------------------------------") + +if __name__ == "__main__": + logging.basicConfig( + format="[%(levelname)s] %(message)s", + handlers=[ + logging.FileHandler("./logs/tuning.log"), + logging.StreamHandler() + ], + level=logging.INFO + ) + + parser = argparse.ArgumentParser(description='PTT Crawer') + parser.add_argument('-c', '--config', type=str, default="./config.json") + # parser.add_argument('-s', '--save_model', action='store_true') + args = parser.parse_args() + + with open(args.config, 'r') as config_file: + config = json.loads(config_file.read()) + main(config) \ No newline at end of file diff --git a/src/main_cdl_ffn.py b/src/main_cdl_ffn.py new file mode 100644 index 0000000..91a41c1 --- /dev/null +++ b/src/main_cdl_ffn.py @@ -0,0 +1,144 @@ +import logging +import pickle +import numpy as np +import argparse +import json +import cdl_ffn +from sklearn.model_selection import train_test_split +from sklearn.model_selection import KFold +from keras.utils import plot_model +from keras.layers import Activation, LeakyReLU +from keras import optimizers +from keras import callbacks +from utils import custom_load, save_result + + + +def main(config): + logging.info('Reading data') + X, Y, user_feature, item_feature, pred_X = custom_load(**config) + + num_user_feature = user_feature.shape[1] # 13 + num_item_feature = item_feature.shape[1] # 11 + + testing_acc = [] + training_acc = [] + autoencoder_entropy = [] + autoencoder_pretrain_entropy = [] + + for i, (train_index, test_index,) in enumerate(KFold(n_splits=3, shuffle=True).split(X)): + X_train, X_test = X[train_index], X[test_index] + Y_train, Y_test = Y[train_index], Y[test_index] + + model = CDL( + user_feature, item_feature, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + predict_layer_dim=[10, 1], + lamda_w=5, # regula + lamda_u=5, # User offset regu + lamda_v=5, # Item offset regu + encoder_noise=0.1, + dropout_rate=0.2, + activation=LeakyReLU(alpha=0)) + autoencoder_pretrain_entropy.append(model.get_autoencoder_loss()) + logging.info("Autoencoder Entropy: %s" % str(autoencoder_pretrain_entropy[-1])) + + model_history = model.fit( + X_train, Y_train, + X_test, Y_test, + lamda_n=5, # Item decode loss + lamda_m=5, # User decode loss + lamda_c=10, # Predict loss + batch_size=32, + epochs=300, + optimizer=optimizers.Adam(lr=0.001), + # callbacks=[callbacks.EarlyStopping( + # monitor='val_loss', + # min_delta=0, + # patience=5, + # verbose=1, + # mode='auto')] + ) + autoencoder_entropy.append(model.get_autoencoder_loss()) + + with open('./history/fold-%d.history' % i, 'wb') as fout: + pickle.dump(model_history.history, fout) + + # plot_model(model.cdl_model, to_file='model.png') + + testing_acc.append(model.get_accuracy(X_test, Y_test)) + training_acc.append(model.get_accuracy(X_train, Y_train)) + logging.info("Accuracy: %f" % testing_acc[-1]) + + logging.info("Initial Autoencoder Entropy") + logging.info("User Autoencoder Entropy: %f" % np.mean([v['user_entropy'] for v in autoencoder_pretrain_entropy])) + logging.info("Item Autoencoder Entropy: %f" % np.mean([v['item_entropy'] for v in autoencoder_pretrain_entropy])) + + logging.info("Final Autoencoder Entropy") + logging.info("User Autoencoder Entropy: %f" % np.mean([v['user_entropy'] for v in autoencoder_entropy])) + logging.info("Item Autoencoder Entropy: %f" % np.mean([v['item_entropy'] for v in autoencoder_entropy])) + + logging.info("K-fold Training Accuracy: [%s]" % ", ".join([str(v) for v in training_acc])) + logging.info("Average Training Accuracy: %f" % np.array(training_acc).mean()) + + logging.info("K-fold Testing Accuracy: [%s]" % ", ".join([str(v) for v in testing_acc])) + logging.info("Average Testing Accuracy: %f" % np.array(testing_acc).mean()) + + +def save_model(config): + + X, Y, users, items, pred_X = custom_load(**config) + + X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=1) + + num_user_feature = users.shape[1] # 13 + num_item_feature = items.shape[1] # 11 + + model = cdl_ffn.CDL( + users, items, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + predict_layer_dim=[10, 1], + lamda_w=5, # regula + lamda_u=5, # User offset regu + lamda_v=5, # Item offset regu + encoder_noise=0.1, + dropout_rate=0.2, + activation=Activation("relu")) + + logging.info("Autoencoder init Entropy: %s" % str(model.get_autoencoder_loss())) + + model.fit( + X_train, Y_train, + X_test, Y_test, + lamda_n=5, # Item decode loss + lamda_m=5, # User decode loss + lamda_c=10, # Predict loss + batch_size=32, + epochs=300, + optimizer=optimizers.Adam(lr=0.001), + # callbacks=[callbacks.EarlyStopping( + # monitor='val_loss', + # min_delta=0, + # patience=5, + # verbose=1, + # mode='auto')] + ) + + logging.info("Autoencoder final Entropy: %s" % str(model.get_autoencoder_loss())) + logging.info("Accuracy: %f" % model.get_accuracy(X_test, Y_test)) + model.get_model().save('./models/CDL-FNN-%f.h5' % model.get_accuracy(X_test, Y_test)) + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s') + parser = argparse.ArgumentParser(description='PTT Crawer') + parser.add_argument('-c', '--config', type=str, default="./config.json") + parser.add_argument('-s', '--save_model', action='store_true') + args = parser.parse_args() + with open(args.config, 'r') as config_file: + config = json.loads(config_file.read()) + if args.save_model: + save_model(config) + else: + main(config) \ No newline at end of file diff --git a/src/main_cdl_fn.py b/src/main_cdl_fn.py new file mode 100644 index 0000000..9930aa6 --- /dev/null +++ b/src/main_cdl_fn.py @@ -0,0 +1,142 @@ +import logging +import pickle +import numpy as np +import argparse +import json +import cdl_fn +from sklearn.model_selection import train_test_split +from sklearn.model_selection import KFold +from keras.utils import plot_model +from keras.layers import Activation, LeakyReLU +from keras import optimizers +from keras import callbacks +from utils import custom_load, save_result + +def main(config): + logging.info('Reading data') + X, Y, user_feature, item_feature, pred_X = custom_load(**config) + + num_user_feature = user_feature.shape[1] # 13 + num_item_feature = item_feature.shape[1] # 11 + + testing_acc = [] + training_acc = [] + autoencoder_entropy = [] + autoencoder_pretrain_entropy = [] + + for i, (train_index, test_index,) in enumerate(KFold(n_splits=3, shuffle=True).split(X)): + X_train, X_test = X[train_index], X[test_index] + Y_train, Y_test = Y[train_index], Y[test_index] + + model = cdl_fn.CDL( + user_feature, item_feature, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + predict_layer_dim=[14, 2], + lamda_w=10, # regula + lamda_u=1, # User offset regu + lamda_v=1, # Item offset regu + encoder_noise=0.2, + dropout_rate=0.2, + activation=Activation('relu')) + autoencoder_pretrain_entropy.append(model.get_autoencoder_loss()) + logging.info("Autoencoder Entropy: %s" % str(autoencoder_pretrain_entropy[-1])) + + model_history = model.fit( + X_train, Y_train, + X_test, Y_test, + lamda_n=10, # Item decode loss + lamda_m=10, # User decode loss + lamda_c=100, # Predict loss + batch_size=64, + epochs=300, + optimizer=optimizers.Adam(lr=0.0005), + # callbacks=[callbacks.EarlyStopping( + # monitor='val_loss', + # min_delta=0, + # patience=10, + # verbose=1, + # mode='auto')] + ) + autoencoder_entropy.append(model.get_autoencoder_loss()) + + with open('./history/fold-%d.history' % i, 'wb') as fout: + pickle.dump(model_history.history, fout) + + # plot_model(model.cdl_model, to_file='model.png') + + testing_acc.append(model.get_accuracy(X_test, Y_test)) + training_acc.append(model.get_accuracy(X_train, Y_train)) + logging.info("Accuracy: %f" % testing_acc[-1]) + + logging.info("Initial Autoencoder Entropy") + logging.info("User Autoencoder Entropy: %f" % np.mean([v['user_entropy'] for v in autoencoder_pretrain_entropy])) + logging.info("Item Autoencoder Entropy: %f" % np.mean([v['item_entropy'] for v in autoencoder_pretrain_entropy])) + + logging.info("Final Autoencoder Entropy") + logging.info("User Autoencoder Entropy: %f" % np.mean([v['user_entropy'] for v in autoencoder_entropy])) + logging.info("Item Autoencoder Entropy: %f" % np.mean([v['item_entropy'] for v in autoencoder_entropy])) + + logging.info("K-fold Training Accuracy: [%s]" % ", ".join([str(v) for v in training_acc])) + logging.info("Average Training Accuracy: %f" % np.array(training_acc).mean()) + + logging.info("K-fold Testing Accuracy: [%s]" % ", ".join([str(v) for v in testing_acc])) + logging.info("Average Testing Accuracy: %f" % np.array(testing_acc).mean()) + + +def save_model(config): + + X, Y, users, items, pred_X = custom_load(**config) + + X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=1) + + num_user_feature = users.shape[1] # 13 + num_item_feature = items.shape[1] # 11 + + model = cdl_ffn.CDL( + users, items, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + predict_layer_dim=[10, 1], + lamda_w=5, # regula + lamda_u=5, # User offset regu + lamda_v=5, # Item offset regu + encoder_noise=0.1, + dropout_rate=0.2, + activation=Activation("relu")) + + logging.info("Autoencoder init Entropy: %s" % str(model.get_autoencoder_loss())) + + model.fit( + X_train, Y_train, + X_test, Y_test, + lamda_n=5, # Item decode loss + lamda_m=5, # User decode loss + lamda_c=10, # Predict loss + batch_size=32, + epochs=300, + optimizer=optimizers.Adam(lr=0.001), + # callbacks=[callbacks.EarlyStopping( + # monitor='val_loss', + # min_delta=0, + # patience=5, + # verbose=1, + # mode='auto')] + ) + + logging.info("Autoencoder final Entropy: %s" % str(model.get_autoencoder_loss())) + logging.info("Accuracy: %f" % model.get_accuracy(X_test, Y_test)) + model.get_model().save('./models/CDL-FNN-%f.h5' % model.get_accuracy(X_test, Y_test)) + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s') + parser = argparse.ArgumentParser(description='PTT Crawer') + parser.add_argument('-c', '--config', type=str, default="./config.json") + parser.add_argument('-s', '--save_model', action='store_true') + args = parser.parse_args() + with open(args.config, 'r') as config_file: + config = json.loads(config_file.read()) + if args.save_model: + save_model(config) + else: + main(config) \ No newline at end of file diff --git a/src/make-prediction.py b/src/make-prediction.py new file mode 100644 index 0000000..17c3928 --- /dev/null +++ b/src/make-prediction.py @@ -0,0 +1,59 @@ +import logging +import json +import cdl +import numpy as np +from sklearn.model_selection import train_test_split +from sklearn.model_selection import KFold +from keras import optimizers +from keras import callbacks +from keras.layers import Activation, LeakyReLU +from utils import custom_load, save_result + +def bagging_predict(models, X): + pred_Y = np.zeros(X.shape[0]) + for model in models: + pred_Y = np.add(pred_Y, model.predict(X)) + return pred_Y > len(models) / 2 + +def main(config): + logging.info('Reading data') + X, Y, users, items, pred_X = custom_load(**config) + + num_user_feature = users.shape[1] # 13 + num_item_feature = items.shape[1] # 11 + + model = cdl.CDL( + users, items, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + lamda_u=32, + lamda_w=0.1344613232775228, + lamda_v=0.27891447944038594, + encoder_noise=0.2, + dropout_rate=0.4, + activation='selu') + + model.fit( + X, Y, + lamda_n=1.2001027195781027, + lamda_m=2.489385178928047, + lamda_c=47.80028677294353, + batch_size=150, + epochs=110, + optimizer=optimizers.Adam(lr=0.001), + # callbacks=[callbacks.EarlyStopping( + # monitor='val_loss', + # min_delta=0, + # patience=10, + # verbose=0, + # mode='auto')] + ) + + pred_Y = model.predict(pred_X) + save_result(pred_X + 1, pred_Y, file_name="out1.csv") + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s') + with open('./config.json', 'r') as config_file: + config = json.loads(config_file.read()) + main(config) \ No newline at end of file diff --git a/src/tuning_cdl.py b/src/tuning_cdl.py new file mode 100644 index 0000000..5bb129a --- /dev/null +++ b/src/tuning_cdl.py @@ -0,0 +1,115 @@ +import logging +import pickle +import json +import numpy as np +import cdl +import json +import argparse +from sklearn.model_selection import train_test_split +from sklearn.model_selection import KFold +from keras.utils import plot_model +from keras.layers import Activation, LeakyReLU +from keras import optimizers +from keras import callbacks +from utils import custom_load, save_result + +def main(config): + X, Y, users, items, pred_X = custom_load(**config) + + num_user_feature = users.shape[1] # 13 + num_item_feature = items.shape[1] # 11 + + k_fold = 10 + + testing_acc = [] + training_acc = [] + autoencoder_entropy = [] + autoencoder_pretrain_entropy = [] + + models = [] + for i, (train_index, test_index,) in enumerate(KFold(n_splits=k_fold, shuffle=True).split(X)): + X_train, X_test = X[train_index], X[test_index] + Y_train, Y_test = Y[train_index], Y[test_index] + + model = cdl.CDL( + users, items, + user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature], + item_layer_dim=[num_item_feature, 7, num_item_feature], + lamda_u=32, + lamda_w=0.1344613232775228, + lamda_v=0.27891447944038594, + encoder_noise=0.2, + dropout_rate=0.4, + activation='selu') + + autoencoder_pretrain_entropy.append(model.get_autoencoder_loss()) + # logging.info("Autoencoder Entropy: %s" % str(autoencoder_pretrain_entropy[-1])) + + model_history = model.fit( + X_train, Y_train, + X_test, Y_test, + lamda_n=1.2001027195781027, + lamda_m=2.489385178928047, + lamda_c=47.80028677294353, + batch_size=150, + epochs=300, + optimizer=optimizers.Adam(lr=0.001), + callbacks=[callbacks.EarlyStopping( + monitor='val_loss', + min_delta=0, + patience=10, + verbose=0, + mode='auto')] + ) + autoencoder_entropy.append(model.get_autoencoder_loss()) + + # Save history + with open('./history/fold-%d.history' % i, 'wb') as fout: + pickle.dump(model_history.history, fout) + + # Save model to png + plot_model(model.cdl_model, to_file='model.png') + + testing_acc.append(model.get_accuracy(X_test, Y_test)) + training_acc.append(model.get_accuracy(X_train, Y_train)) + models.append(model) + logging.info("Accuracy: %f" % testing_acc[-1]) + + logging.info("Initial Autoencoder Entropy") + logging.info("\tUser: %f" % np.mean([v['user_entropy'] for v in autoencoder_pretrain_entropy])) + logging.info("\tItem: %f" % np.mean([v['item_entropy'] for v in autoencoder_pretrain_entropy])) + + logging.info("Final Autoencoder Entropy:") + logging.info("\tUser: %f" % np.mean([v['user_entropy'] for v in autoencoder_entropy])) + logging.info("\tItem: %f" % np.mean([v['item_entropy'] for v in autoencoder_entropy])) + + logging.info("Training:") + logging.info("\tK-fold Accuracy: %s" % training_acc) + logging.info("\tAverage Accuracy: %f" % np.array(training_acc).mean()) + + logging.info("Testing:") + logging.info("\tK-fold Accuracy: %s" % testing_acc) + logging.info("\tAverage Accuracy: %f" % np.array(testing_acc).mean()) + + best_model = models[np.argmax(testing_acc)] + pred_Y = best_model.predict(pred_X) + save_result(pred_X + 1, pred_Y, file_name="out2.csv") + +if __name__ == "__main__": + logging.basicConfig( + format="[%(levelname)s] %(message)s", + handlers=[ + logging.FileHandler("./logs/tuning.log"), + logging.StreamHandler() + ], + level=logging.INFO + ) + + parser = argparse.ArgumentParser(description='PTT Crawer') + parser.add_argument('-c', '--config', type=str, default="./config.json") + # parser.add_argument('-s', '--save_model', action='store_true') + args = parser.parse_args() + + with open(args.config, 'r') as config_file: + config = json.loads(config_file.read()) + main(config) \ No newline at end of file diff --git a/src/utils.py b/src/utils.py new file mode 100644 index 0000000..37ae8af --- /dev/null +++ b/src/utils.py @@ -0,0 +1,44 @@ +from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder +import pandas as pd +import numpy as np + +def save_result(pred_X, pred_Y, file_name="../out.csv"): + with open(file_name, 'w') as fout: + fout.write('User-Item1-Item2,Preference\n') + for i in range(pred_X.shape[0]): + fout.write('%d-%d-%d,%d\n' % (pred_X[i][0], pred_X[i][1], pred_X[i][2], pred_Y[i])) + +def custom_load(users_path, items_path, train_path, predict_path, onehot=True, normalization=False): + users_csv = pd.read_csv(users_path, index_col=0) + items_csv = pd.read_csv(items_path, index_col=0) + train_csv = pd.read_csv(train_path) + pred_event_csv = pd.read_csv(predict_path) + + users = np.asarray(users_csv) + items = np.asarray(items_csv) + train_data = np.asarray(train_csv) + pred_X = np.asarray(pred_event_csv) - 1 + X, Y = train_data[:, :3] - 1, train_data[:, 3:].ravel() + + # Fill missing value + for col in range(users.shape[1]): + mode = np.argmax(np.bincount(users[:, col])) + users[users[:, col] == 0, col] = mode + + items[:, [2, 3]] = items[:, [3, 2]] + # Onehot + if onehot: + mapping = {2.5: 1, 3.5: 2, 4.5: 3, 5.5: 4, 6.2: 5} + for i in range(items.shape[0]): + items[i, -1] = mapping[items[i, -1]] + users = OneHotEncoder().fit_transform(users).toarray() + items = OneHotEncoder().fit_transform(items).toarray() + + if normalization: + # Normalization + users = StandardScaler().fit_transform(users).toarray() + items = StandardScaler().fit_transform(items).toarray() + + return X, Y, users, items, pred_X + + \ No newline at end of file diff --git a/utils.py b/utils.py deleted file mode 100644 index 8f5b487..0000000 --- a/utils.py +++ /dev/null @@ -1,18 +0,0 @@ -import numpy as np -from pandas import read_csv - -def read_rating(file_path, has_header=True): - rating_mat = list() - with open(file_path) as fp: - if has_header is True: - fp.readline() - for line in fp: - line = line.split(',') - user, item, rating = line[0], line[1], line[2] - rating_mat.append( [user, item, rating] ) - return np.array(rating_mat).astype('float32') - -def read_feature(file_path): - feat_mat = read_csv(file_path, sep=',') - assert( np.all(feat_mat['id'] == feat_mat.index) ) - return feat_mat.drop('id', 1).as_matrix() \ No newline at end of file