Skip to content

Commit

Permalink
first attempt compression
Browse files Browse the repository at this point in the history
  • Loading branch information
LorenzoValente3 committed Dec 6, 2021
1 parent 5379a22 commit bb7e100
Show file tree
Hide file tree
Showing 102 changed files with 564 additions and 25,340 deletions.
194 changes: 104 additions & 90 deletions AE.ipynb

Large diffs are not rendered by default.

138 changes: 49 additions & 89 deletions classifier_MNIST_NNonFPGA.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -126,7 +126,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 2,
"metadata": {
"scrolled": true
},
Expand All @@ -137,7 +137,7 @@
"dtype('uint8')"
]
},
"execution_count": 10,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
Expand All @@ -155,7 +155,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -252,7 +252,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 4,
"metadata": {},
"outputs": [
{
Expand All @@ -263,7 +263,7 @@
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"input_2 (InputLayer) [(None, 64)] 0 \n",
"input_1 (InputLayer) [(None, 64)] 0 \n",
"_________________________________________________________________\n",
"dense (Dense) (None, 32) 2080 \n",
"_________________________________________________________________\n",
Expand All @@ -282,7 +282,7 @@
],
"source": [
"#ENCODER\n",
"input = Input(shape=(X_train_flat_zoom.shape[-1],))\n",
"input = Input(shape=(X_train_flat_zoom_int.shape[-1],))\n",
"encoder = Dense(32, activation='relu')(input)\n",
"encoder = Dense(2, activation='relu')(encoder)\n",
"\n",
Expand All @@ -298,100 +298,60 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0726 - val_loss: 0.0720\n",
"Epoch 2/40\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0719 - val_loss: 0.0714\n",
"Epoch 3/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0713 - val_loss: 0.0710\n",
"Epoch 4/40\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0709 - val_loss: 0.0706\n",
"Epoch 5/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0705 - val_loss: 0.0702\n",
"Epoch 6/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0702 - val_loss: 0.0699\n",
"Epoch 7/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0699 - val_loss: 0.0696\n",
"Epoch 8/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0696 - val_loss: 0.0693\n",
"Epoch 9/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0693 - val_loss: 0.0691\n",
"Epoch 10/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0691 - val_loss: 0.0689\n",
"Epoch 11/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0688 - val_loss: 0.0687\n",
"Epoch 12/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0686 - val_loss: 0.0685\n",
"Epoch 13/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0684 - val_loss: 0.0683\n",
"Epoch 14/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0683 - val_loss: 0.0681\n",
"Epoch 15/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0681 - val_loss: 0.0680\n",
"Epoch 16/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0679 - val_loss: 0.0679\n",
"Epoch 17/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0678 - val_loss: 0.0677\n",
"Epoch 18/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0677 - val_loss: 0.0676\n",
"Epoch 19/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0675 - val_loss: 0.0676\n",
"Epoch 20/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0674 - val_loss: 0.0674\n",
"Epoch 21/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0673 - val_loss: 0.0673\n",
"Epoch 22/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0672 - val_loss: 0.0673\n",
"Epoch 23/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0671 - val_loss: 0.0672\n",
"Epoch 24/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0670 - val_loss: 0.0671\n",
"Epoch 25/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0669 - val_loss: 0.0670\n",
"Epoch 26/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0668 - val_loss: 0.0669\n",
"Epoch 27/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0668 - val_loss: 0.0668\n",
"Epoch 28/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0667 - val_loss: 0.0668\n",
"Epoch 29/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0666 - val_loss: 0.0667\n",
"Epoch 30/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0665 - val_loss: 0.0666\n",
"Epoch 31/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0664 - val_loss: 0.0665\n",
"Epoch 32/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0663 - val_loss: 0.0665\n",
"Epoch 33/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0662 - val_loss: 0.0664\n",
"Epoch 34/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0661 - val_loss: 0.0664\n",
"Epoch 35/40\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0661 - val_loss: 0.0663\n",
"Epoch 36/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0660 - val_loss: 0.0662\n",
"Epoch 37/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0659 - val_loss: 0.0661\n",
"Epoch 38/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0658 - val_loss: 0.0661\n",
"Epoch 39/40\n",
"235/235 [==============================] - 0s 2ms/step - loss: 0.0658 - val_loss: 0.0660\n",
"Epoch 40/40\n",
"235/235 [==============================] - 1s 2ms/step - loss: 0.0657 - val_loss: 0.0660\n"
"Epoch 1/20\n",
"235/235 [==============================] - 1s 5ms/step - loss: 0.1300 - val_loss: 0.1014\n",
"Epoch 2/20\n",
"235/235 [==============================] - 1s 4ms/step - loss: 0.0979 - val_loss: 0.0946\n",
"Epoch 3/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0930 - val_loss: 0.0911\n",
"Epoch 4/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0903 - val_loss: 0.0891\n",
"Epoch 5/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0885 - val_loss: 0.0876\n",
"Epoch 6/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0871 - val_loss: 0.0862\n",
"Epoch 7/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0856 - val_loss: 0.0847\n",
"Epoch 8/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0842 - val_loss: 0.0832\n",
"Epoch 9/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0829 - val_loss: 0.0820\n",
"Epoch 10/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0817 - val_loss: 0.0810\n",
"Epoch 11/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0807 - val_loss: 0.0802\n",
"Epoch 12/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0800 - val_loss: 0.0797\n",
"Epoch 13/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0794 - val_loss: 0.0792\n",
"Epoch 14/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0790 - val_loss: 0.0788\n",
"Epoch 15/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0786 - val_loss: 0.0784\n",
"Epoch 16/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0782 - val_loss: 0.0780\n",
"Epoch 17/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0779 - val_loss: 0.0777\n",
"Epoch 18/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0776 - val_loss: 0.0774\n",
"Epoch 19/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0772 - val_loss: 0.0770\n",
"Epoch 20/20\n",
"235/235 [==============================] - 1s 3ms/step - loss: 0.0768 - val_loss: 0.0766\n"
]
}
],
"source": [
"history = autoencoder.fit(X_train_flat_zoom, X_train_flat_zoom,\n",
" validation_data=(X_test_flat_zoom, X_test_flat_zoom),\n",
" batch_size = 256, epochs = 40,\n",
" batch_size = 256, epochs = 20,\n",
" shuffle = True\n",
" )"
]
Expand Down
Loading

0 comments on commit bb7e100

Please sign in to comment.