From 0213f93029245a010813603968d56cb65df1fe6f Mon Sep 17 00:00:00 2001 From: ml server Date: Thu, 30 Jan 2020 21:33:21 -0700 Subject: [PATCH] some cleanup, tweaks --- notebooks/wut-train.ipynb | 233 ++++++++------------------------------ 1 file changed, 48 insertions(+), 185 deletions(-) diff --git a/notebooks/wut-train.ipynb b/notebooks/wut-train.ipynb index f44cd5c..cd34aac 100644 --- a/notebooks/wut-train.ipynb +++ b/notebooks/wut-train.ipynb @@ -21,43 +21,11 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"Start\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "from __future__ import print_function\n", "import os\n", - "import datetime" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow.python.keras" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "import datetime\n", + "import numpy as np\n", + "import tensorflow.python.keras\n", "from tensorflow.python.keras import Sequential\n", "from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n", "from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n", @@ -66,70 +34,21 @@ "from tensorflow.python.keras.preprocessing import image\n", "from tensorflow.python.keras.models import load_model\n", "from tensorflow.python.keras.preprocessing.image import load_img\n", - "from tensorflow.python.keras.preprocessing.image import img_to_array" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "from tensorflow.python.keras.preprocessing.image import img_to_array\n", + "from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n", "from tensorflow.python.keras.models import Model\n", - "from tensorflow.python.keras.layers import Input, concatenate" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "from tensorflow.python.keras.layers import Input, concatenate\n", "# Visualization\n", "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", - "from sklearn.decomposition import PCA" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "from sklearn.decomposition import PCA\n", "# Seaborn pip dependency\n", - "import seaborn as sns" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "import seaborn as sns\n", "# Interact\n", "# https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html\n", - "from __future__ import print_function\n", "from ipywidgets import interact, interactive, fixed, interact_manual\n", - "import ipywidgets as widgets" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "import ipywidgets as widgets\n", "# Display Images\n", "from IPython.display import display, Image" ] @@ -140,16 +59,11 @@ "metadata": {}, "outputs": [], "source": [ - "print(\"Python import done\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ENCODING='GMSK'" + "ENCODING='GMSK'\n", + "batch_size = 128\n", + "epochs = 4\n", + "IMG_WIDTH = 416\n", + "IMG_HEIGHT = 803" ] }, { @@ -158,56 +72,17 @@ "metadata": {}, "outputs": [], "source": [ + "train_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING )\n", "train_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING, 'train')\n", - "val_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING, 'val')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "val_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING, 'val')\n", "train_good_dir = os.path.join(train_dir, 'good')\n", - "train_bad_dir = os.path.join(train_dir, 'bad')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "train_bad_dir = os.path.join(train_dir, 'bad')\n", "val_good_dir = os.path.join(val_dir, 'good')\n", - "val_bad_dir = os.path.join(val_dir, 'bad')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "val_bad_dir = os.path.join(val_dir, 'bad')\n", "num_train_good = len(os.listdir(train_good_dir))\n", - "num_train_bad = len(os.listdir(train_bad_dir))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "num_train_bad = len(os.listdir(train_bad_dir))\n", "num_val_good = len(os.listdir(val_good_dir))\n", - "num_val_bad = len(os.listdir(val_bad_dir))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "num_val_bad = len(os.listdir(val_bad_dir))\n", "total_train = num_train_good + num_train_bad\n", "total_val = num_val_good + num_val_bad" ] @@ -220,39 +95,19 @@ "source": [ "print('total training good images:', num_train_good)\n", "print('total training bad images:', num_train_bad)\n", - "print(\"--\")\n", + "#print(\"--\")\n", "print(\"Total training images:\", total_train)\n", - "print('total validation good images:', num_val_good)\n", - "print('total validation bad images:', num_val_bad)\n", - "print(\"--\")\n", - "print(\"Total validation images:\", total_val)\n", - "print(\"Reduce training and validation set when testing\")\n", - "#total_train = 100\n", - "#total_val = 100\n", - "print(\"Train =\")\n", - "print(total_train)\n", - "print(\"Validation =\")\n", - "print(total_val)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "batch_size = 128\n", - "epochs = 16" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "IMG_WIDTH = 416\n", - "IMG_HEIGHT = 803" + "#print('total validation good images:', num_val_good)\n", + "#print('total validation bad images:', num_val_bad)\n", + "#print(\"--\")\n", + "#print(\"Total validation images:\", total_val)\n", + "#print(\"Reduce training and validation set when testing\")\n", + "total_train = 100\n", + "total_val = 100\n", + "#print(\"Train =\")\n", + "#print(total_train)\n", + "#print(\"Validation =\")\n", + "#print(total_val)" ] }, { @@ -363,7 +218,8 @@ "log_dir = \"logs\"\n", "#log_dir=\"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n", "#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n", - "tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir)" + "#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir)\n", + "tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, write_graph=True, write_images=True, embeddings_freq=1, update_freq='batch')" ] }, { @@ -391,9 +247,14 @@ "metadata": {}, "outputs": [], "source": [ - "model.compile(optimizer='adam',\n", - " loss='binary_crossentropy',\n", - " metrics=['accuracy'])" + "#wutoptimizer = 'adam'\n", + "wutoptimizer = tensorflow.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=True)\n", + "wutloss = 'binary_crossentropy'\n", + "#wutmetrics = 'accuracy'\n", + "wutmetrics = ['accuracy']\n", + "model.compile(optimizer=wutoptimizer,\n", + " loss=wutloss,\n", + " metrics=[wutmetrics])" ] }, { @@ -429,14 +290,16 @@ "metadata": {}, "outputs": [], "source": [ - "history = model.fit_generator(\n", + "history = model.fit(\n", " train_data_gen,\n", " steps_per_epoch=total_train // batch_size,\n", " epochs=epochs,\n", + " verbose=1,\n", + " callbacks=[tensorboard_callback],\n", " validation_data=val_data_gen,\n", " validation_steps=total_val // batch_size,\n", - " callbacks=[tensorboard_callback],\n", - " verbose=1\n", + " shuffle=True,\n", + " use_multiprocessing=False\n", ")" ] },