Add jupyter notebooks from wut

main
jebba 2022-01-24 16:07:24 -07:00
parent 999f96179b
commit 8ca321ff6d
4 changed files with 2369 additions and 0 deletions

View File

@ -0,0 +1,774 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# wut --- What U Think? SatNOGS Observation AI.\n",
"#\n",
"# https://spacecruft.org/spacecruft/satnogs-wut"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# GPLv3+"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Built using Jupyter, Tensorflow, Keras"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Other data to consider adding:\n",
"# * JSON metadata\n",
"# * TLE\n",
"# * Audio File (ogg)\n",
"# https://www.tensorflow.org/io/api_docs/python/tfio/ffmpeg/AudioDataset\n",
"# * Decoded Data (HEX, ASCII, PNG)\n",
"# Data from external sources to consider adding:\n",
"# * Weather"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Start\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow.python.keras"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.python.keras import Sequential\n",
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
"from tensorflow.python.keras import optimizers\n",
"from tensorflow.python.keras.preprocessing import image\n",
"from tensorflow.python.keras.models import load_model\n",
"from tensorflow.python.keras.preprocessing.image import load_img\n",
"from tensorflow.python.keras.preprocessing.image import img_to_array"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.python.keras.models import Model\n",
"from tensorflow.python.keras.layers import Input, concatenate"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualization\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from sklearn.decomposition import PCA"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Seaborn pip dependency\n",
"import seaborn as sns"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Interact\n",
"# https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html\n",
"from __future__ import print_function\n",
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
"import ipywidgets as widgets"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Display Images\n",
"from IPython.display import display, Image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Python import done\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_dir = os.path.join('data/', 'train')\n",
"val_dir = os.path.join('data/', 'val')\n",
"test_dir = os.path.join('data/', 'test')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_good_dir = os.path.join(train_dir, 'good')\n",
"train_bad_dir = os.path.join(train_dir, 'bad')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"val_good_dir = os.path.join(val_dir, 'good')\n",
"val_bad_dir = os.path.join(val_dir, 'bad')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"num_train_good = len(os.listdir(train_good_dir))\n",
"num_train_bad = len(os.listdir(train_bad_dir))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"num_val_good = len(os.listdir(val_good_dir))\n",
"num_val_bad = len(os.listdir(val_bad_dir))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"num_test = len(os.listdir(test_dir))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"total_train = num_train_good + num_train_bad\n",
"total_val = num_val_good + num_val_bad"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('total training good images:', num_train_good)\n",
"print('total training bad images:', num_train_bad)\n",
"print(\"--\")\n",
"print(\"Total training images:\", total_train)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('total validation good images:', num_val_good)\n",
"print('total validation bad images:', num_val_bad)\n",
"print(\"--\")\n",
"print(\"Total validation images:\", total_val)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Reduce training and validation set when testing\")\n",
"#total_train = 500\n",
"#total_val = 500\n",
"print(\"Train =\")\n",
"print(total_train)\n",
"print(\"Validation =\")\n",
"print(total_val)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Good results\n",
"#batch_size = 128\n",
"#epochs = 6\n",
"#\n",
"# Testing, faster more inaccurate results\n",
"batch_size = 96\n",
"epochs = 3"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Half size\n",
"IMG_HEIGHT = 416\n",
"IMG_WIDTH= 804\n",
"# Full size, machine barfs probably needs more RAM\n",
"#IMG_HEIGHT = 832\n",
"#IMG_WIDTH = 1606"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_image_generator = ImageDataGenerator(\n",
" rescale=1./255\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"val_image_generator = ImageDataGenerator(\n",
" rescale=1./255\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_image_generator = ImageDataGenerator(\n",
" rescale=1./255\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,\n",
" directory=train_dir,\n",
" shuffle=True,\n",
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
" class_mode='binary')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,\n",
" directory=val_dir,\n",
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
" class_mode='binary')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(test_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size,\n",
" directory=test_dir,\n",
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
" class_mode='binary')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sample_train_images, _ = next(train_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sample_val_images, _ = next(val_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sample_test_images, _ = next(test_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
"def plotImages(images_arr):\n",
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
" axes = axes.flatten()\n",
" for img, ax in zip( images_arr, axes):\n",
" ax.imshow(img)\n",
" ax.axis('off')\n",
" plt.tight_layout()\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plotImages(sample_train_images[0:3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plotImages(sample_val_images[0:3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plotImages(sample_test_images[0:3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = Sequential([\n",
" Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),\n",
" MaxPooling2D(),\n",
" Conv2D(32, 3, padding='same', activation='relu'),\n",
" MaxPooling2D(),\n",
" Conv2D(64, 3, padding='same', activation='relu'),\n",
" MaxPooling2D(),\n",
" Flatten(),\n",
" Dense(512, activation='relu'),\n",
" Dense(1, activation='sigmoid')\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.compile(optimizer='adam',\n",
" loss='binary_crossentropy',\n",
" metrics=['accuracy'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Image.LOAD_TRUNCATED_IMAGES = True"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"history = model.fit_generator(\n",
" train_data_gen,\n",
" steps_per_epoch=total_train // batch_size,\n",
" epochs=epochs,\n",
" validation_data=val_data_gen,\n",
" validation_steps=total_val // batch_size,\n",
" verbose=1\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"acc = history.history['accuracy']\n",
"val_acc = history.history['val_accuracy']\n",
"\n",
"loss = history.history['loss']\n",
"val_loss = history.history['val_loss']\n",
"\n",
"epochs_range = range(epochs)\n",
"\n",
"plt.figure(figsize=(8, 8))\n",
"plt.subplot(1, 2, 1)\n",
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
"plt.plot(epochs_range, val_acc, label='Validation Accuracy')\n",
"plt.legend(loc='lower right')\n",
"plt.title('Training and Validation Accuracy')\n",
"\n",
"plt.subplot(1, 2, 2)\n",
"plt.plot(epochs_range, loss, label='Training Loss')\n",
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
"plt.legend(loc='upper right')\n",
"plt.title('Training and Validation Loss')\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"TRAINING info\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(train_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(train_good_dir)\n",
"print(train_bad_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(train_image_generator)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(train_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#print(sample_train_images)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(history)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.save('data/wut-CW.h5')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# https://keras.io/models/sequential/\n",
"print(\"predict\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pred=model.predict_generator(test_data_gen,\n",
"steps=1,\n",
"verbose=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"prediction = model.predict(\n",
" x=test_data_gen,\n",
" verbose=2\n",
")\n",
"print(\"end predict\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"predictions=[]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Show prediction score\n",
"print(prediction)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"prediction_bool = (prediction >0.8)\n",
"print(prediction_bool)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"predictions = prediction_bool.astype(int)\n",
"print(predictions)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Make final prediction\n",
"# XXX, display name, display all of them with mini waterfall, etc.\n",
"if prediction_bool[0] == False:\n",
" rating = 'bad'\n",
"else:\n",
" rating = 'good'\n",
"print('Observation: %s' % (rating))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if prediction_bool[1] == False:\n",
" rating = 'bad'\n",
"else:\n",
" rating = 'good'\n",
"print('Observation: %s' % (rating))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if prediction_bool[2] == False:\n",
" rating = 'bad'\n",
"else:\n",
" rating = 'good'\n",
"print('Observation: %s' % (rating))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The End"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,401 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# wut-predict --- What U Think? SatNOGS Observation AI, makes predictions.\n",
"#\n",
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
"# Based on data/train and data/val directories builds a wut.h5 file.\n",
"# Reads wut.h5 and tests files in data/test/unvetted/"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# GPLv3+"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Built using Jupyter, Tensorflow, Keras"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Start\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow.python.keras"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.python.keras import Sequential\n",
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
"from tensorflow.python.keras import optimizers\n",
"from tensorflow.python.keras.preprocessing import image\n",
"from tensorflow.python.keras.models import load_model\n",
"from tensorflow.python.keras.preprocessing.image import load_img\n",
"from tensorflow.python.keras.preprocessing.image import img_to_array"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualization\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from sklearn.decomposition import PCA"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Seaborn pip dependency\n",
"import seaborn as sns"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Interact\n",
"# https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html\n",
"from __future__ import print_function\n",
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
"import ipywidgets as widgets"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Display Images\n",
"from IPython.display import display, Image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Python import done\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Load HDF file\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = load_model('data/models/wut-DUV.tf')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_dir = os.path.join('data/', 'test')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"num_test = len(os.listdir(test_dir))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Good results\n",
"#batch_size = 128\n",
"#epochs = 6\n",
"# Testing, faster more inaccurate results\n",
"batch_size = 32\n",
"epochs = 3"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Half size\n",
"IMG_HEIGHT = 416\n",
"IMG_WIDTH= 804\n",
"# Full size, machine barfs probably needs more RAM\n",
"#IMG_HEIGHT = 832\n",
"#IMG_WIDTH = 1606"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_image_generator = ImageDataGenerator(\n",
" rescale=1./255\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(test_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size,\n",
" directory=test_dir,\n",
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
" class_mode='binary')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sample_test_images, _ = next(test_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
"def plotImages(images_arr):\n",
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
" axes = axes.flatten()\n",
" for img, ax in zip( images_arr, axes):\n",
" ax.imshow(img)\n",
" ax.axis('off')\n",
" plt.tight_layout()\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plotImages(sample_test_images[0:1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# https://keras.io/models/sequential/\n",
"print(\"predict\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#pred=model.predict_generator(test_data_gen,\n",
"#steps=1,\n",
"#verbose=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"prediction = model.predict(\n",
" x=test_data_gen,\n",
" verbose=1\n",
")\n",
"print(\"end predict\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"predictions=[]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Show prediction score\n",
"print(prediction)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"prediction_bool = (prediction >0.8)\n",
"print(prediction_bool)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"predictions = prediction_bool.astype(int)\n",
"print(predictions)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Make final prediction\n",
"# XXX, display name, display all of them with mini waterfall, etc.\n",
"if prediction_bool[0] == False:\n",
" rating = 'bad'\n",
"else:\n",
" rating = 'good'\n",
"print('Observation: %s' % (rating))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The End"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,420 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# wut-train --- What U Think? SatNOGS Observation AI, training application.\n",
"#\n",
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
"#\n",
"# Based on data/train and data/val directories builds a wut.h5 file.\n",
"#\n",
"# GPLv3+"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from __future__ import print_function\n",
"import os\n",
"import datetime\n",
"import numpy as np\n",
"import tensorflow.python.keras\n",
"from tensorflow.python.keras import Sequential\n",
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
"from tensorflow.python.keras import optimizers\n",
"from tensorflow.python.keras.preprocessing import image\n",
"from tensorflow.python.keras.models import load_model\n",
"from tensorflow.python.keras.preprocessing.image import load_img\n",
"from tensorflow.python.keras.preprocessing.image import img_to_array\n",
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
"from tensorflow.python.keras.models import Model\n",
"from tensorflow.python.keras.layers import Input, concatenate\n",
"# Visualization\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from sklearn.decomposition import PCA\n",
"# Seaborn pip dependency\n",
"import seaborn as sns\n",
"# Interact\n",
"# https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html\n",
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
"import ipywidgets as widgets\n",
"# Display Images\n",
"from IPython.display import display, Image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ENCODING='GMSK'\n",
"batch_size = 128\n",
"epochs = 4\n",
"IMG_WIDTH = 416\n",
"IMG_HEIGHT = 803"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING )\n",
"train_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING, 'train')\n",
"val_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING, 'val')\n",
"train_good_dir = os.path.join(train_dir, 'good')\n",
"train_bad_dir = os.path.join(train_dir, 'bad')\n",
"val_good_dir = os.path.join(val_dir, 'good')\n",
"val_bad_dir = os.path.join(val_dir, 'bad')\n",
"num_train_good = len(os.listdir(train_good_dir))\n",
"num_train_bad = len(os.listdir(train_bad_dir))\n",
"num_val_good = len(os.listdir(val_good_dir))\n",
"num_val_bad = len(os.listdir(val_bad_dir))\n",
"total_train = num_train_good + num_train_bad\n",
"total_val = num_val_good + num_val_bad"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('total training good images:', num_train_good)\n",
"print('total training bad images:', num_train_bad)\n",
"#print(\"--\")\n",
"print(\"Total training images:\", total_train)\n",
"#print('total validation good images:', num_val_good)\n",
"#print('total validation bad images:', num_val_bad)\n",
"#print(\"--\")\n",
"#print(\"Total validation images:\", total_val)\n",
"#print(\"Reduce training and validation set when testing\")\n",
"total_train = 100\n",
"total_val = 100\n",
"#print(\"Train =\")\n",
"#print(total_train)\n",
"#print(\"Validation =\")\n",
"#print(total_val)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_image_generator = ImageDataGenerator( rescale=1./255 )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"val_image_generator = ImageDataGenerator( rescale=1./255 )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,\n",
" directory=train_dir,\n",
" shuffle=True,\n",
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
" class_mode='binary')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,\n",
" directory=val_dir,\n",
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
" class_mode='binary')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sample_train_images, _ = next(train_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sample_val_images, _ = next(val_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
"def plotImages(images_arr):\n",
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
" axes = axes.flatten()\n",
" for img, ax in zip( images_arr, axes):\n",
" ax.imshow(img)\n",
" ax.axis('off')\n",
" plt.tight_layout()\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plotImages(sample_train_images[0:3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plotImages(sample_val_images[0:3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%load_ext tensorboard\n",
"!rm -rf ./logs/\n",
"os.mkdir(\"logs\")\n",
"log_dir = \"logs\"\n",
"#log_dir=\"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n",
"#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n",
"#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir)\n",
"tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, write_graph=True, write_images=True, embeddings_freq=1, update_freq='batch')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = Sequential([\n",
" Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),\n",
" MaxPooling2D(),\n",
" Conv2D(32, 3, padding='same', activation='relu'),\n",
" MaxPooling2D(),\n",
" Conv2D(64, 3, padding='same', activation='relu'),\n",
" MaxPooling2D(),\n",
" Flatten(),\n",
" Dense(512, activation='relu'),\n",
" Dense(1, activation='sigmoid')\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#wutoptimizer = 'adam'\n",
"wutoptimizer = tensorflow.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=True)\n",
"wutloss = 'binary_crossentropy'\n",
"#wutmetrics = 'accuracy'\n",
"wutmetrics = ['accuracy']\n",
"model.compile(optimizer=wutoptimizer,\n",
" loss=wutloss,\n",
" metrics=[wutmetrics])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Image.LOAD_TRUNCATED_IMAGES = True"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%tensorboard --logdir logs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"history = model.fit(\n",
" train_data_gen,\n",
" steps_per_epoch=total_train // batch_size,\n",
" epochs=epochs,\n",
" verbose=1,\n",
" callbacks=[tensorboard_callback],\n",
" validation_data=val_data_gen,\n",
" validation_steps=total_val // batch_size,\n",
" shuffle=True,\n",
" use_multiprocessing=False\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"acc = history.history['accuracy']\n",
"val_acc = history.history['val_accuracy']\n",
"\n",
"loss = history.history['loss']\n",
"val_loss = history.history['val_loss']\n",
"\n",
"epochs_range = range(epochs)\n",
"\n",
"plt.figure(figsize=(8, 8))\n",
"plt.subplot(1, 2, 1)\n",
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
"plt.plot(epochs_range, val_acc, label='Validation Accuracy')\n",
"plt.legend(loc='lower right')\n",
"plt.title('Training and Validation Accuracy')\n",
"\n",
"plt.subplot(1, 2, 2)\n",
"plt.plot(epochs_range, loss, label='Training Loss')\n",
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
"plt.legend(loc='upper right')\n",
"plt.title('Training and Validation Loss')\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"TRAINING info\")\n",
"print(train_dir)\n",
"print(train_good_dir)\n",
"print(train_bad_dir)\n",
"print(train_image_generator)\n",
"print(train_data_gen)\n",
"#print(sample_train_images)\n",
"print(history)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.save('/srv/satnogs/data/models/GMSK/wut-GMSK-201912.h5')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.save('/srv/satnogs/data/models/GMSK/wut-GMSK-201912.tf')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.keras.utils import plot_model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plot_model(model, show_shapes=True, show_layer_names=True, expand_nested=True, dpi=72, to_file='/srv/satnogs/data/models/GMSK/plot_model.png')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#from IPython.display import SVG\n",
"#from tensorflow.keras.utils import model_to_dot\n",
"#SVG(model_to_dot(model).create(prog='dot', format='svg'))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

774
notebooks/wut.ipynb 100644
View File

@ -0,0 +1,774 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# wut --- What U Think? SatNOGS Observation AI.\n",
"#\n",
"# https://spacecruft.org/spacecruft/satnogs-wut"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# GPLv3+"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Built using Jupyter, Tensorflow, Keras"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Other data to consider adding:\n",
"# * JSON metadata\n",
"# * TLE\n",
"# * Audio File (ogg)\n",
"# https://www.tensorflow.org/io/api_docs/python/tfio/ffmpeg/AudioDataset\n",
"# * Decoded Data (HEX, ASCII, PNG)\n",
"# Data from external sources to consider adding:\n",
"# * Weather"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Start\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow.python.keras"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.python.keras import Sequential\n",
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
"from tensorflow.python.keras import optimizers\n",
"from tensorflow.python.keras.preprocessing import image\n",
"from tensorflow.python.keras.models import load_model\n",
"from tensorflow.python.keras.preprocessing.image import load_img\n",
"from tensorflow.python.keras.preprocessing.image import img_to_array"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.python.keras.models import Model\n",
"from tensorflow.python.keras.layers import Input, concatenate"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Visualization\n",
"%matplotlib inline\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from sklearn.decomposition import PCA"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Seaborn pip dependency\n",
"import seaborn as sns"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Interact\n",
"# https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html\n",
"from __future__ import print_function\n",
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
"import ipywidgets as widgets"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Display Images\n",
"from IPython.display import display, Image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Python import done\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_dir = os.path.join('data/', 'train')\n",
"val_dir = os.path.join('data/', 'val')\n",
"test_dir = os.path.join('data/', 'test')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_good_dir = os.path.join(train_dir, 'good')\n",
"train_bad_dir = os.path.join(train_dir, 'bad')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"val_good_dir = os.path.join(val_dir, 'good')\n",
"val_bad_dir = os.path.join(val_dir, 'bad')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"num_train_good = len(os.listdir(train_good_dir))\n",
"num_train_bad = len(os.listdir(train_bad_dir))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"num_val_good = len(os.listdir(val_good_dir))\n",
"num_val_bad = len(os.listdir(val_bad_dir))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"num_test = len(os.listdir(test_dir))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"total_train = num_train_good + num_train_bad\n",
"total_val = num_val_good + num_val_bad"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('total training good images:', num_train_good)\n",
"print('total training bad images:', num_train_bad)\n",
"print(\"--\")\n",
"print(\"Total training images:\", total_train)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('total validation good images:', num_val_good)\n",
"print('total validation bad images:', num_val_bad)\n",
"print(\"--\")\n",
"print(\"Total validation images:\", total_val)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Reduce training and validation set when testing\")\n",
"#total_train = 500\n",
"#total_val = 500\n",
"print(\"Train =\")\n",
"print(total_train)\n",
"print(\"Validation =\")\n",
"print(total_val)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Good results\n",
"#batch_size = 128\n",
"#epochs = 6\n",
"#\n",
"# Testing, faster more inaccurate results\n",
"batch_size = 96\n",
"epochs = 3"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Half size\n",
"IMG_HEIGHT = 416\n",
"IMG_WIDTH= 804\n",
"# Full size, machine barfs probably needs more RAM\n",
"#IMG_HEIGHT = 832\n",
"#IMG_WIDTH = 1606"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_image_generator = ImageDataGenerator(\n",
" rescale=1./255\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"val_image_generator = ImageDataGenerator(\n",
" rescale=1./255\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_image_generator = ImageDataGenerator(\n",
" rescale=1./255\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,\n",
" directory=train_dir,\n",
" shuffle=True,\n",
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
" class_mode='binary')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,\n",
" directory=val_dir,\n",
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
" class_mode='binary')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(test_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size,\n",
" directory=test_dir,\n",
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
" class_mode='binary')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sample_train_images, _ = next(train_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sample_val_images, _ = next(val_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sample_test_images, _ = next(test_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
"def plotImages(images_arr):\n",
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
" axes = axes.flatten()\n",
" for img, ax in zip( images_arr, axes):\n",
" ax.imshow(img)\n",
" ax.axis('off')\n",
" plt.tight_layout()\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plotImages(sample_train_images[0:3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plotImages(sample_val_images[0:3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plotImages(sample_test_images[0:3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = Sequential([\n",
" Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),\n",
" MaxPooling2D(),\n",
" Conv2D(32, 3, padding='same', activation='relu'),\n",
" MaxPooling2D(),\n",
" Conv2D(64, 3, padding='same', activation='relu'),\n",
" MaxPooling2D(),\n",
" Flatten(),\n",
" Dense(512, activation='relu'),\n",
" Dense(1, activation='sigmoid')\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.compile(optimizer='adam',\n",
" loss='binary_crossentropy',\n",
" metrics=['accuracy'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Image.LOAD_TRUNCATED_IMAGES = True"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"history = model.fit_generator(\n",
" train_data_gen,\n",
" steps_per_epoch=total_train // batch_size,\n",
" epochs=epochs,\n",
" validation_data=val_data_gen,\n",
" validation_steps=total_val // batch_size,\n",
" verbose=1\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"acc = history.history['accuracy']\n",
"val_acc = history.history['val_accuracy']\n",
"\n",
"loss = history.history['loss']\n",
"val_loss = history.history['val_loss']\n",
"\n",
"epochs_range = range(epochs)\n",
"\n",
"plt.figure(figsize=(8, 8))\n",
"plt.subplot(1, 2, 1)\n",
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
"plt.plot(epochs_range, val_acc, label='Validation Accuracy')\n",
"plt.legend(loc='lower right')\n",
"plt.title('Training and Validation Accuracy')\n",
"\n",
"plt.subplot(1, 2, 2)\n",
"plt.plot(epochs_range, loss, label='Training Loss')\n",
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
"plt.legend(loc='upper right')\n",
"plt.title('Training and Validation Loss')\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"TRAINING info\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(train_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(train_good_dir)\n",
"print(train_bad_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(train_image_generator)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(train_data_gen)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#print(sample_train_images)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(history)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.save('data/wut-CW.h5')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# https://keras.io/models/sequential/\n",
"print(\"predict\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pred=model.predict_generator(test_data_gen,\n",
"steps=1,\n",
"verbose=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"prediction = model.predict(\n",
" x=test_data_gen,\n",
" verbose=2\n",
")\n",
"print(\"end predict\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"predictions=[]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Show prediction score\n",
"print(prediction)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"prediction_bool = (prediction >0.8)\n",
"print(prediction_bool)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"predictions = prediction_bool.astype(int)\n",
"print(predictions)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Make final prediction\n",
"# XXX, display name, display all of them with mini waterfall, etc.\n",
"if prediction_bool[0] == False:\n",
" rating = 'bad'\n",
"else:\n",
" rating = 'good'\n",
"print('Observation: %s' % (rating))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if prediction_bool[1] == False:\n",
" rating = 'bad'\n",
"else:\n",
" rating = 'good'\n",
"print('Observation: %s' % (rating))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"if prediction_bool[2] == False:\n",
" rating = 'bad'\n",
"else:\n",
" rating = 'good'\n",
"print('Observation: %s' % (rating))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The End"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}