Compare commits
127 Commits
Author | SHA1 | Date |
---|---|---|
Jeff Moe | 7195382398 | |
Jeff Moe | 22fbe4073a | |
Jeff Moe | ac2ae3f1cf | |
Jeff Moe | fb236fcf30 | |
Jeff Moe | 8aee0ac624 | |
Jeff Moe | 8dd443ab30 | |
Jeff Moe | 33adccb2cb | |
Jeff Moe | ec25b09b24 | |
Jeff Moe | 3068a39e3a | |
Jeff Moe | 827b950a14 | |
Jeff Moe | 1fbad10405 | |
Jeff Moe | 323ceda7eb | |
Jeff Moe | b07a53458e | |
Jeff Moe | 8feeb6c896 | |
Jeff Moe | 6be11cb7aa | |
Jeff Moe | 3f6434f6dd | |
Jeff Moe | 73fbdf43da | |
Jeff Moe | 20d96f0a61 | |
Jeff Moe | fa3d0b0284 | |
Jeff Moe | 2c86557fcb | |
Jeff Moe | 35dc603832 | |
Jeff Moe | 16956df5ca | |
Jeff Moe | a094fea6bf | |
Jeff Moe | 8a5f8fe070 | |
Jeff Moe | 5e987198bc | |
Jeff Moe | b6ac03590a | |
Jeff Moe | 270178d027 | |
Jeff Moe | 1517670e7c | |
Jeff Moe | 4d175ce254 | |
Jeff Moe | e63c52299b | |
Jeff Moe | 2d7f366ecc | |
root | 16226ae82e | |
root | a3bde31b09 | |
root | c960a49b3f | |
root | 75a111c871 | |
root | 452d8ab0c9 | |
root | b58edb583e | |
root | 6907baeb55 | |
root | 324a9b8d02 | |
root | a9343fe66a | |
root | 067605de08 | |
root | 32322b7f15 | |
root | 94fd593865 | |
root | b9600fab49 | |
root | b07f9d4e0f | |
root | 7d4e3d445e | |
root | ebf16d1893 | |
root | 9c6ab722e5 | |
root | 8c7b80083b | |
root | 34feced97b | |
root | 9fdf801eb7 | |
root | 9c26f46464 | |
root | b70aadee1a | |
root | 6cdb5a304e | |
root | 8cdf0970c9 | |
root | c9c3398e9f | |
Jeff Moe | 65037f21d1 | |
Jeff Moe | a1275b77f9 | |
Jeff Moe | 87234b88c6 | |
Jeff Moe | b7c1712853 | |
jebba | b52c07e8ed | |
jebba | cb7150826f | |
jebba | 0415b186a7 | |
server | 653f9d0115 | |
ml server | 14d07b9c6c | |
ml server | d33cfbdedd | |
ml server | 5a91662237 | |
server | b828430d37 | |
server | 18bec38353 | |
server | a2360bd109 | |
server | 98f355fb05 | |
server | 3eab82b19e | |
server | 0d25371eac | |
ml server | 3f3f8d3af2 | |
ml server | d458c7236d | |
ml server | cee5d151d9 | |
ml server | d1edfd4ace | |
ml server | 2cb732eac7 | |
ml server | 1e1b147740 | |
ml server | 8d237bd2ba | |
ml server | 6ea9bce41c | |
ml server | c3d3612da1 | |
ml server | d48a041e83 | |
ml server | 0213f93029 | |
ml server | 4d23a231b0 | |
ml server | d96585e514 | |
ml server | 7eb8676f11 | |
ml server | 298348aad0 | |
jebba | 5d6b0be4ab | |
ml server | ca0ad42851 | |
ml server | 0bf1519409 | |
ml server | 5c921438b8 | |
ml server | d8c685e58c | |
ml server | 81528c6044 | |
ml server | 983157846c | |
ml server | a74bd74a4d | |
ml server | 1c67889a03 | |
ml server | 05715d19b5 | |
ml server | 6f3b4daab4 | |
ml server | 54b53d217a | |
ml server | 6b72e87dfc | |
ml server | 33923b906f | |
ml server | 0865b28c66 | |
ml server | 3442bffdf2 | |
ml server | f17c2c3672 | |
ml server | 469efb2050 | |
ml server | 30675bc5ca | |
ml server | 2c3d73fd41 | |
ml server | 86584c1f27 | |
ml server | 3eef31079b | |
ml server | 8d302f4b00 | |
ml server | 68c30f255b | |
ml server | 5c083de35b | |
ml server | d56603c7f9 | |
ml server | c93fb9df76 | |
ml server | 32b8d2d521 | |
ml server | d102e6dacf | |
ml server | 777c8b6106 | |
ml server | 3f4dc0a815 | |
ml server | 6486b1d0b3 | |
ml server | 85e047e46c | |
ml server | bb6b54ce81 | |
ml server | f06d83db73 | |
ml server | d6866fa547 | |
ml server | 1d3a0b10f2 | |
ml server | 5c51e9f86d | |
ml server | 3cfb0fd0be |
|
@ -5,3 +5,8 @@ dump*.json
|
||||||
preprocess/
|
preprocess/
|
||||||
tmp/
|
tmp/
|
||||||
.ipynb_checkpoints/
|
.ipynb_checkpoints/
|
||||||
|
notebooks/logs/
|
||||||
|
.~lock.*#
|
||||||
|
log/
|
||||||
|
notebooks/model.png
|
||||||
|
bin/
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
# Makefile
|
||||||
|
|
||||||
|
prefix = /usr/local
|
||||||
|
bindir = $(prefix)/bin
|
||||||
|
|
||||||
|
all:
|
||||||
|
$(MAKE) -C src
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -fr bin/
|
||||||
|
|
||||||
|
install:
|
||||||
|
@cp -vp bin/* $(bindir)/
|
||||||
|
|
||||||
|
uninstall:
|
||||||
|
@rm -vf \
|
||||||
|
$(bindir)/wut \
|
||||||
|
$(bindir)/wut-aria-active \
|
||||||
|
$(bindir)/wut-aria-add \
|
||||||
|
$(bindir)/wut-aria-daemon \
|
||||||
|
$(bindir)/wut-aria-info \
|
||||||
|
$(bindir)/wut-aria-methods \
|
||||||
|
$(bindir)/wut-aria-shutdown \
|
||||||
|
$(bindir)/wut-aria-stat \
|
||||||
|
$(bindir)/wut-aria-stopped \
|
||||||
|
$(bindir)/wut-aria-waiting \
|
||||||
|
$(bindir)/wut-audio-archive \
|
||||||
|
$(bindir)/wut-audio-sha1 \
|
||||||
|
$(bindir)/wut-compare \
|
||||||
|
$(bindir)/wut-compare-all \
|
||||||
|
$(bindir)/wut-compare-tx \
|
||||||
|
$(bindir)/wut-compare-txmode \
|
||||||
|
$(bindir)/wut-compare-txmode-csv \
|
||||||
|
$(bindir)/wut-dl-sort \
|
||||||
|
$(bindir)/wut-dl-sort-tx \
|
||||||
|
$(bindir)/wut-dl-sort-txmode \
|
||||||
|
$(bindir)/wut-dl-sort-txmode-all \
|
||||||
|
$(bindir)/wut-files \
|
||||||
|
$(bindir)/wut-files-data \
|
||||||
|
$(bindir)/wut-files-data-all \
|
||||||
|
$(bindir)/wut-ia-sha1 \
|
||||||
|
$(bindir)/wut-ia-torrents \
|
||||||
|
$(bindir)/wut-img-ck.py \
|
||||||
|
$(bindir)/wut-ml \
|
||||||
|
$(bindir)/wut-ml-auto \
|
||||||
|
$(bindir)/wut-ml-load \
|
||||||
|
$(bindir)/wut-ml-save \
|
||||||
|
$(bindir)/wut-obs \
|
||||||
|
$(bindir)/wut-ogg2wav \
|
||||||
|
$(bindir)/wut-review-staging \
|
||||||
|
$(bindir)/wut-rm-random \
|
||||||
|
$(bindir)/wut-tf \
|
||||||
|
$(bindir)/wut-tf.py \
|
||||||
|
$(bindir)/wut-water \
|
||||||
|
$(bindir)/wut-water-range \
|
||||||
|
$(bindir)/wut-worker \
|
||||||
|
$(bindir)/wut-worker-mas \
|
||||||
|
$(bindir)/wut-worker-mas.py \
|
||||||
|
$(bindir)/wut-worker.py \
|
||||||
|
|
|
@ -1,79 +0,0 @@
|
||||||
# Distributed Computing
|
|
||||||
HOWTO Set up and run Tensorflow on multiple nodes.
|
|
||||||
This is to this particular configuration.
|
|
||||||
|
|
||||||
# Software
|
|
||||||
Main software in use:
|
|
||||||
|
|
||||||
* Debian
|
|
||||||
* Proxmox
|
|
||||||
* Ceph
|
|
||||||
* Python 3
|
|
||||||
* Tensorflow
|
|
||||||
* Jupyter
|
|
||||||
* `clusterssh`
|
|
||||||
|
|
||||||
# Installation
|
|
||||||
Major steps.
|
|
||||||
|
|
||||||
1. Install Proxmox on bare metal.
|
|
||||||
1. Clone Debian KVM Nodes.
|
|
||||||
1. Set up nodes.
|
|
||||||
1. Install Tensorflow.
|
|
||||||
1. Set up Ceph.
|
|
||||||
|
|
||||||
## Proxmox
|
|
||||||
Setting up Proxmox is outside the scope of this document.
|
|
||||||
All you really need is some virtual machines, however
|
|
||||||
they are created.
|
|
||||||
|
|
||||||
* https://www.proxmox.com/en/proxmox-ve
|
|
||||||
|
|
||||||
## Set up nodes
|
|
||||||
```
|
|
||||||
# On main workstation or node where you built tensorflow:
|
|
||||||
NODES="ml1 ml2 ml3 ml4 ml5"
|
|
||||||
for i in $NODES
|
|
||||||
do scp -p tensorflow-2.1.0-cp37-cp37m-linux_x86_64.whl $i:
|
|
||||||
done
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
# On worker nodes:
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install python3-pip sshfs
|
|
||||||
# XXX deps...
|
|
||||||
pip3 install --upgrade setuptools
|
|
||||||
pip3 install --user tensorflow-2.1.0-cp37-cp37m-linux_x86_64.whl
|
|
||||||
pip3 install --user simplejson
|
|
||||||
pip3 install --user pillow
|
|
||||||
```
|
|
||||||
|
|
||||||
Another way, using upstream tensorflow packages.
|
|
||||||
You also have to install the latest `pip` from `pip`,
|
|
||||||
or you'll get `tensorflow 1.x`.
|
|
||||||
```
|
|
||||||
pip3 install pip
|
|
||||||
pip3 install --upgrade pip
|
|
||||||
# make sure new `pip3` at `~/.local/bin/pip3` is in front in `$PATH`.
|
|
||||||
# install tensorflow
|
|
||||||
pip3 install --user tensorflow
|
|
||||||
# If that fails due to the PATH, run like:
|
|
||||||
~/.local/bin/pip3 install --user tensorflow
|
|
||||||
pip3 list | grep tensorflow
|
|
||||||
# There's a bunch of tests that can be run, such as:
|
|
||||||
python3 ~/devel/tensorflow/tensorflow/tensorflow/python/distribute/multi_worker_continuous_run_test.py
|
|
||||||
```
|
|
||||||
|
|
||||||
# Usage
|
|
||||||
`top`
|
|
||||||
|
|
||||||
# Meh
|
|
||||||
```
|
|
||||||
# for running some tensorflow tests:
|
|
||||||
pip3 install --user portpicker
|
|
||||||
# For other examples/tests:
|
|
||||||
#pip3 install --user opencv-python
|
|
||||||
apt install python3-opencv
|
|
||||||
pip3 install --user pandas
|
|
||||||
```
|
|
|
@ -1,58 +0,0 @@
|
||||||
# Voila
|
|
||||||
|
|
||||||
Voila is a way to turn Jupyter notebooks into web applications.
|
|
||||||
|
|
||||||
# Install
|
|
||||||
Start with basic Debian Buster install.
|
|
||||||
|
|
||||||
```
|
|
||||||
# set up partitions
|
|
||||||
# XXX deps...
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install apache2 python3-certbot-apache python3-pip sshfs
|
|
||||||
certbot
|
|
||||||
systemctl restart apache2
|
|
||||||
adduser wut
|
|
||||||
sudo su - wut
|
|
||||||
pip3 install --upgrade pip
|
|
||||||
# make sure new `pip3` at `~/.local/bin/pip3` is in front in `$PATH`.
|
|
||||||
logout #log back in
|
|
||||||
sudo su - wut
|
|
||||||
pip3 install --user --upgrade setuptools
|
|
||||||
pip3 install --user simplejson
|
|
||||||
pip3 install --user pillow
|
|
||||||
pip3 install --user tensorflow
|
|
||||||
pip3 list | grep tensorflow
|
|
||||||
pip3 install --user voila
|
|
||||||
pip3 install --user ipywidgets
|
|
||||||
pip3 install --user matplotlib
|
|
||||||
# probably not needed
|
|
||||||
pip3 install --user sklearn
|
|
||||||
pip3 install --user seaborn
|
|
||||||
# set up hosts file, network, etc.
|
|
||||||
# set up apache proxy
|
|
||||||
# start voila
|
|
||||||
```
|
|
||||||
|
|
||||||
# Misc
|
|
||||||
Other deps, perhaps used, needed.
|
|
||||||
|
|
||||||
* https://ipywidgets.readthedocs.io/en/stable/user_install.html
|
|
||||||
|
|
||||||
To enable Jupyter widgets:
|
|
||||||
|
|
||||||
```
|
|
||||||
apt install npm nodejs
|
|
||||||
jupyter nbextension enable --py widgetsnbextension
|
|
||||||
jupyter labextension install @jupyter-widgets/jupyterlab-manager
|
|
||||||
```
|
|
||||||
Misc:
|
|
||||||
```
|
|
||||||
pip3 install --user ipython_blocking
|
|
||||||
```
|
|
||||||
|
|
||||||
# wut?
|
|
||||||
Site:
|
|
||||||
|
|
||||||
* https://wut.spacecruft.org
|
|
||||||
|
|
304
README.md
304
README.md
|
@ -7,60 +7,77 @@ Website:
|
||||||
|
|
||||||
|
|
||||||
# satnogs-wut
|
# satnogs-wut
|
||||||
<div>
|
![Image](pics/wut-web.png)
|
||||||
<img src="satnogs-wut/media/branch/master/pics/wut-web.png" width="600"/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
The goal of satnogs-wut is to have a script that will take an
|
The goal of satnogs-wut is to have a script that will take an
|
||||||
observation ID and return an answer whether the observation is
|
observation ID and return an answer whether the observation is
|
||||||
"good", "bad", or "failed".
|
"good", "bad", or "failed".
|
||||||
|
|
||||||
## Good Observation
|
## Good Observation
|
||||||
<div>
|
![Image](pics/waterfall-good.png)
|
||||||
<img src="satnogs-wut/media/branch/master/pics/waterfall-good.png" width="300"/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
## Bad Observation
|
## Bad Observation
|
||||||
<div>
|
![Image](pics/waterfall-bad.png)
|
||||||
<img src="satnogs-wut/media/branch/master/pics/waterfall-bad.png" width="300"/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
## Failed Observation
|
## Failed Observation
|
||||||
<div>
|
![Image](pics/waterfall-failed.png)
|
||||||
<img src="satnogs-wut/media/branch/master/pics/waterfall-failed.png" width="300"/>
|
|
||||||
</div>
|
## wut Web
|
||||||
|
Main site:
|
||||||
|
* https://wut.spacecruft.org/
|
||||||
|
|
||||||
|
Source code:
|
||||||
|
* https://spacecruft.org/spacecruft/satnogs-wut
|
||||||
|
|
||||||
|
Beta (test) site:
|
||||||
|
|
||||||
|
* https://wut-beta.spacecruft.org/
|
||||||
|
|
||||||
|
Alpha (development) site:
|
||||||
|
|
||||||
|
* https://wut-alpha.spacecruft.org/
|
||||||
|
|
||||||
## Observations
|
## Observations
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
* https://wiki.satnogs.org/Operation
|
* https://wiki.satnogs.org/Operation
|
||||||
* https://wiki.satnogs.org/Rating_Observations
|
* https://wiki.satnogs.org/Observe
|
||||||
* https://wiki.satnogs.org/Taxonomy_of_Observations
|
* https://wiki.satnogs.org/Observations
|
||||||
|
* https://wiki.satnogs.org/Category:RF_Modes
|
||||||
* Sample observation: https://network.satnogs.org/observations/1456893/
|
* Sample observation: https://network.satnogs.org/observations/1456893/
|
||||||
|
|
||||||
# Machine Learning
|
# Machine Learning
|
||||||
The system at present is built upon the following:
|
The system at present is built upon the following:
|
||||||
|
|
||||||
* Debian Buster.
|
* Debian Bookworm (testing/12).
|
||||||
* Tensorflow 2.1 with built-in Keras.
|
* Tensorflow.
|
||||||
* Jupyter Lab.
|
* Jupyter Lab.
|
||||||
|
* Voila.
|
||||||
|
|
||||||
Learning/testing, results are ~~inaccurate~~ getting closer.
|
Learning/testing, results are good.
|
||||||
The main AI/ML development is now being done in Jupyter.
|
The main AI/ML development is being done in Jupyter.
|
||||||
|
|
||||||
# Jupyter
|
# Jupyter
|
||||||
There is a Jupyter Lab Notebook file.
|
There Jupyter Lab Notebook files in the `notebooks/` subdirectory.
|
||||||
This is producing real results at present, but has a long ways to go still...
|
These are producing usable results. Voila is used to convert
|
||||||
|
Jupyter notebooks into websites.
|
||||||
|
|
||||||
|
* `wut.ipynb` --- Machine learning Python script using Tensorflow and Keras in a Jupyter Notebook.
|
||||||
|
* `wut-predict.ipynb` --- Make prediction (rating) of observation from pre-existing model.
|
||||||
|
* `wut-train.ipynb` --- Train models to be using by prediction engine.
|
||||||
|
* `wut-web.ipynb` --- Website: https://wut.spacecruft.org/
|
||||||
|
* `wut-web-beta.ipynb` --- Website: https://wut-beta.spacecruft.org/
|
||||||
|
* `wut-web-alpha.ipynb` --- Website: https://wut-alpha.spacecruft.org/
|
||||||
|
|
||||||
* `wut-ml.ipynb` --- Machine learning Python script using Tensorflow and Keras in a Jupyter Notebook.
|
|
||||||
* `wut-predict.ipynb` --- Make prediction (rating) of observation, using `data/wut.h5`.
|
|
||||||
* `wut-train.ipynb` --- ML Training file saved to `data/wut.h5`.
|
|
||||||
|
|
||||||
# wut scripts
|
# wut scripts
|
||||||
The following scripts are in the repo:
|
The following scripts are in the repo.
|
||||||
|
|
||||||
* `wut` --- Feed it an observation ID and it returns if it is a "good", "bad", or "failed" observation.
|
* `wut` --- Feed it an observation ID and it returns if it is a "good", "bad", or "failed" observation.
|
||||||
|
* `wut-aria-add` --- Add a torrent from the Internet Archive to the aria daemon for downloading.
|
||||||
|
* `wut-aria-daemon` --- Run an aria daemon for torrent downloads from the Internet Archive.
|
||||||
* `wut-audio-archive` --- Downloads audio files from archive.org.
|
* `wut-audio-archive` --- Downloads audio files from archive.org.
|
||||||
|
* `wut-audio-sha1` --- Verifies sha1 checksums of files downloaded from archive.org.
|
||||||
* `wut-compare` --- Compare an observations' current presumably human vetting with a `wut` vetting.
|
* `wut-compare` --- Compare an observations' current presumably human vetting with a `wut` vetting.
|
||||||
* `wut-compare-all` --- Compare all the observations in `download/` with `wut` vettings.
|
* `wut-compare-all` --- Compare all the observations in `download/` with `wut` vettings.
|
||||||
* `wut-compare-tx` --- Compare all the observations in `download/` with `wut` vettings using selected transmitter UUID.
|
* `wut-compare-tx` --- Compare all the observations in `download/` with `wut` vettings using selected transmitter UUID.
|
||||||
|
@ -69,19 +86,34 @@ The following scripts are in the repo:
|
||||||
* `wut-dl-sort` --- Populate `data/` dir with waterfalls from `download/`.
|
* `wut-dl-sort` --- Populate `data/` dir with waterfalls from `download/`.
|
||||||
* `wut-dl-sort-tx` --- Populate `data/` dir with waterfalls from `download/` using selected transmitter UUID.
|
* `wut-dl-sort-tx` --- Populate `data/` dir with waterfalls from `download/` using selected transmitter UUID.
|
||||||
* `wut-dl-sort-txmode` --- Populate `data/` dir with waterfalls from `download/` using selected encoding.
|
* `wut-dl-sort-txmode` --- Populate `data/` dir with waterfalls from `download/` using selected encoding.
|
||||||
|
* `wut-dl-sort-txmode-all` --- Populate `data/` dir with waterfalls from `download/` using all encodings.
|
||||||
* `wut-files` --- Tells you about what files you have in `downloads/` and `data/`.
|
* `wut-files` --- Tells you about what files you have in `downloads/` and `data/`.
|
||||||
|
* `wut-files-data` --- Tells you about what files you have in `data/`.
|
||||||
|
* `wut-ia` --- Download SatNOGS data from the Internet Archive at `archive.org`.
|
||||||
|
* `wut-ia-torrents` --- Download SatNOGS torrents from the Internet Archive at `archive.org`.
|
||||||
|
* `wut-img-ck.py` --- Validate image files are not corrupt with PIL.
|
||||||
* `wut-ml` --- Main machine learning Python script using Tensorflow and Keras.
|
* `wut-ml` --- Main machine learning Python script using Tensorflow and Keras.
|
||||||
|
* `wut-ml-auto` --- Machine learning Python script using Tensorflow and Keras, auto.
|
||||||
* `wut-ml-load` --- Machine learning Python script using Tensorflow and Keras, load `data/wut.h5`.
|
* `wut-ml-load` --- Machine learning Python script using Tensorflow and Keras, load `data/wut.h5`.
|
||||||
* `wut-ml-save` --- Machine learning Python script using Tensorflow and Keras, save `data/wut.h5`.
|
* `wut-ml-save` --- Machine learning Python script using Tensorflow and Keras, save `data/wut.h5`.
|
||||||
* `wut-obs` --- Download the JSON for an observation ID.
|
* `wut-obs` --- Download the JSON for an observation ID.
|
||||||
* `wut-ogg2wav` --- Convert `.ogg` files in `downloads/` to `.wav` files.
|
* `wut-ogg2wav` --- Convert `.ogg` files in `downloads/` to `.wav` files.
|
||||||
|
* `wut-rm-random` --- Randomly deletes stuff. Very bad.
|
||||||
* `wut-review-staging` --- Review all images in `data/staging`.
|
* `wut-review-staging` --- Review all images in `data/staging`.
|
||||||
|
* `wut-tf` --- Shell script to set variables when launching `wut-tf.py`.
|
||||||
|
* `wut-tf.py` --- Distributed learning script to be run on multiple nodes.
|
||||||
* `wut-water` --- Download waterfall for an observation ID to `download/[ID]`.
|
* `wut-water` --- Download waterfall for an observation ID to `download/[ID]`.
|
||||||
* `wut-water-range` --- Download waterfalls for a range of observation IDs to `download/[ID]`.
|
* `wut-water-range` --- Download waterfalls for a range of observation IDs to `download/[ID]`.
|
||||||
|
* `wut-worker` --- Shell script to set variables when launching `wut-worker.py`.
|
||||||
|
* `wut-worker.py` --- Distributed training script to run on multiple nodes.
|
||||||
|
* `wut-worker-mas` --- Shell script to set variables when launching `wut-worker-mas.py`.
|
||||||
|
* `wut-worker-mas.py` --- Distributed training script to run on multiple nodes, alt version.
|
||||||
|
|
||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
Most of the scripts are simple shell scripts with few dependencies.
|
Installation notes...
|
||||||
|
|
||||||
|
There's more docs on a few different setups in the `docs/` subdir.
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
The scripts use files that are ignored in the git repo.
|
The scripts use files that are ignored in the git repo.
|
||||||
|
@ -100,60 +132,56 @@ mkdir -p data/test/unvetted
|
||||||
```
|
```
|
||||||
|
|
||||||
## Debian Packages
|
## Debian Packages
|
||||||
You'll need `curl` and `jq`, both in Debian's repos.
|
Install dependencies from Debian.
|
||||||
|
|
||||||
```
|
```
|
||||||
apt update
|
sudo apt update
|
||||||
apt install curl jq
|
sudo apt install curl jq python3-pip graphviz
|
||||||
```
|
```
|
||||||
|
|
||||||
## Install Tensorflow
|
## Install Python Packages
|
||||||
For the machine learning scripts, like `wut-ml`, Tensorflow
|
For the machine learning scripts, like `wut-ml`, Tensorflow
|
||||||
needs to be installed.
|
needs to be installed.
|
||||||
As of version 2 of Tensorflow, Keras no longer needs to be
|
|
||||||
installed separately.
|
|
||||||
|
|
||||||
|
|
||||||
The verions of Tensorflow installed with `pip3` on Debian
|
|
||||||
Buster crashes. It is perhaps best to do a custom install,
|
|
||||||
best preferred build options, of the most preferred version.
|
|
||||||
At this point, the `remotes/origin/r2.1` branch is preferred.
|
|
||||||
|
|
||||||
|
|
||||||
To install Tensorflow:
|
|
||||||
|
|
||||||
* https://www.tensorflow.org/install/source
|
|
||||||
|
|
||||||
1. Install dependencies in Debian.
|
|
||||||
|
|
||||||
1. Install Bazel to build Tensorflow.
|
|
||||||
|
|
||||||
1. Build Tensorflow pip package.
|
|
||||||
|
|
||||||
1. Install Tensorflow from custom pip package.
|
|
||||||
|
|
||||||
|
You need to add `~/.local/bin` to your `$PATH`:
|
||||||
|
|
||||||
```
|
```
|
||||||
# Install deps
|
echo 'PATH=~/.local/bin:$PATH' >> ~/.bashrc
|
||||||
apt update
|
```
|
||||||
apt install python3-pip
|
|
||||||
# Install bazel .deb from releases here:
|
Then log out and back in, or reload ala:
|
||||||
firefox https://github.com/bazelbuild/bazel/releases
|
```
|
||||||
# Install Tensorflow
|
. ~/.bashrc
|
||||||
git clone tensorflow...
|
```
|
||||||
cd tensorflow
|
|
||||||
git checkout v2.1.0
|
Update pip to latest pretty version, in local directory.
|
||||||
bazel clean
|
Vary Python package install, suited to taste.
|
||||||
# Get flags to pass:
|
|
||||||
grep flags -m1 /proc/cpuinfo | cut -d ":" -f 2 | tr '[:upper:]' '[:lower:]' | { read FLAGS; OPT="-march=native"; for flag in $FLAGS; do case "$flag" in "sse4_1" | "sse4_2" | "ssse3" | "fma" | "cx16" | "popcnt" | "avx" | "avx2") OPT+=" -m$flag";; esac; done; MODOPT=${OPT//_/\.}; echo "$MODOPT"; }
|
```
|
||||||
./configure
|
pip install --user --upgrade pip
|
||||||
# Run Bazel to build pip package. Takes nearly 2 hours to build.
|
```
|
||||||
bazel build --config=opt //tensorflow/tools/pip_package:build_pip_package
|
|
||||||
./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
|
Make sure you have right pip:
|
||||||
pip3 install --user /tmp/tensorflow_pkg/tensorflow-2.1.0-cp37-cp37m-linux_x86_64.whl
|
```
|
||||||
|
debian@workstation:~$ which pip
|
||||||
|
/home/debian/.local/bin/pip
|
||||||
|
```
|
||||||
|
|
||||||
|
Install Python packages:
|
||||||
|
```
|
||||||
|
pip install --user --upgrade -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
Make and install `satnogs-wut`:
|
||||||
|
|
||||||
|
```
|
||||||
|
make
|
||||||
|
sudo make install
|
||||||
```
|
```
|
||||||
|
|
||||||
### Tensorflow KVM Notes
|
### Tensorflow KVM Notes
|
||||||
|
Note, for KVM, pass cpu=host if host has "avx" in `/proc/cpuinfo`.
|
||||||
|
|
||||||
Recent versions of Tensorflow can handle many more CPU build options
|
Recent versions of Tensorflow can handle many more CPU build options
|
||||||
to optimize for speed, such as
|
to optimize for speed, such as
|
||||||
[AVX](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions).
|
[AVX](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions).
|
||||||
|
@ -165,81 +193,13 @@ For more info about this in Proxmox, see
|
||||||
If you don't have this enabled, CPU instructions will fail or
|
If you don't have this enabled, CPU instructions will fail or
|
||||||
Tensorflow will run slower than it could.
|
Tensorflow will run slower than it could.
|
||||||
|
|
||||||
### Tensor Configuration
|
|
||||||
```
|
|
||||||
$ ./configure
|
|
||||||
WARNING: --batch mode is deprecated. Please instead explicitly shut down your Bazel server using the command "bazel shutdown".
|
|
||||||
You have bazel 0.29.1 installed.
|
|
||||||
Please specify the location of python. [Default is /usr/bin/python3]:
|
|
||||||
|
|
||||||
|
## Jupyter
|
||||||
Found possible Python library paths:
|
|
||||||
/usr/lib/python3/dist-packages
|
|
||||||
/usr/local/lib/python3.7/dist-packages
|
|
||||||
Please input the desired Python library path to use. Default is [/usr/lib/python3/dist-packages]
|
|
||||||
|
|
||||||
Do you wish to build TensorFlow with XLA JIT support? [Y/n]:
|
|
||||||
XLA JIT support will be enabled for TensorFlow.
|
|
||||||
|
|
||||||
Do you wish to build TensorFlow with OpenCL SYCL support? [y/N]:
|
|
||||||
No OpenCL SYCL support will be enabled for TensorFlow.
|
|
||||||
|
|
||||||
Do you wish to build TensorFlow with ROCm support? [y/N]:
|
|
||||||
No ROCm support will be enabled for TensorFlow.
|
|
||||||
|
|
||||||
Do you wish to build TensorFlow with CUDA support? [y/N]:
|
|
||||||
No CUDA support will be enabled for TensorFlow.
|
|
||||||
|
|
||||||
Do you wish to download a fresh release of clang? (Experimental) [y/N]:
|
|
||||||
Clang will not be downloaded.
|
|
||||||
|
|
||||||
Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native -Wno-sign-compare]: -march=native -mssse3 -mcx16 -msse4.1 -msse4.2 -mpopcnt -mavx
|
|
||||||
|
|
||||||
|
|
||||||
Would you like to interactively configure ./WORKSPACE for Android builds? [y/N]:
|
|
||||||
Not configuring the WORKSPACE for Android builds.
|
|
||||||
|
|
||||||
Preconfigured Bazel build configs. You can use any of the below by adding "--config=<>" to your build command. See .bazelrc for more details.
|
|
||||||
--config=mkl # Build with MKL support.
|
|
||||||
--config=monolithic # Config for mostly static monolithic build.
|
|
||||||
--config=ngraph # Build with Intel nGraph support.
|
|
||||||
--config=numa # Build with NUMA support.
|
|
||||||
--config=dynamic_kernels # (Experimental) Build kernels into separate shared objects.
|
|
||||||
--config=v2 # Build TensorFlow 2.x instead of 1.x.
|
|
||||||
Preconfigured Bazel build configs to DISABLE default on features:
|
|
||||||
--config=noaws # Disable AWS S3 filesystem support.
|
|
||||||
--config=nogcp # Disable GCP support.
|
|
||||||
--config=nohdfs # Disable HDFS support.
|
|
||||||
--config=nonccl # Disable NVIDIA NCCL support.
|
|
||||||
Configuration finished
|
|
||||||
```
|
|
||||||
|
|
||||||
## KVM
|
|
||||||
Note, for KVM, pass cpu=host if host has "avx" in `/proc/cpuinfo`.
|
|
||||||
|
|
||||||
## Install Jupyter
|
|
||||||
Jupyter is a cute little web interface that makes Python programming
|
Jupyter is a cute little web interface that makes Python programming
|
||||||
easy. It works well for machine learning because you can step through
|
easy. It works well for machine learning because you can step through
|
||||||
just parts of the code, changing variables and immediately seeing
|
just parts of the code, changing variables and immediately seeing
|
||||||
output in the web browser.
|
output in the web browser.
|
||||||
|
|
||||||
Probably installed like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
pip3 install --user jupyterlab
|
|
||||||
# Also other good packages, maybe like:
|
|
||||||
pip3 install --user jupyter-tensorboard
|
|
||||||
pip3 list | grep jupyter
|
|
||||||
# returns:
|
|
||||||
jupyter 1.0.0
|
|
||||||
jupyter-client 5.3.4
|
|
||||||
jupyter-console 6.0.0
|
|
||||||
jupyter-core 4.6.1
|
|
||||||
jupyter-tensorboard 0.1.10
|
|
||||||
jupyterlab 1.2.4
|
|
||||||
jupyterlab-server 1.0.6
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
# Usage
|
# Usage
|
||||||
The main purpose of the script is to evaluate an observation,
|
The main purpose of the script is to evaluate an observation,
|
||||||
|
@ -268,12 +228,6 @@ The following steps need to be performed:
|
||||||
|
|
||||||
1. Rate an observation using the `wut` script.
|
1. Rate an observation using the `wut` script.
|
||||||
|
|
||||||
# ml.spacecruft.org
|
|
||||||
This server is processing the data and has directories available
|
|
||||||
to sync.
|
|
||||||
|
|
||||||
* https://ml.spacecruft.org/
|
|
||||||
|
|
||||||
## Data Caching Downloads
|
## Data Caching Downloads
|
||||||
The scripts are designed to not download a waterfall or make a JSON request
|
The scripts are designed to not download a waterfall or make a JSON request
|
||||||
for an observation it has already requested. The first time an observation
|
for an observation it has already requested. The first time an observation
|
||||||
|
@ -291,64 +245,30 @@ Files in the `preprocess/` directory have been preprocessed to be used
|
||||||
further in the pipeline. This contains `.wav` files that have been
|
further in the pipeline. This contains `.wav` files that have been
|
||||||
decoded from `.ogg` files.
|
decoded from `.ogg` files.
|
||||||
|
|
||||||
|
## Internet Archive Downloads
|
||||||
|
The Internet Archive has a mirror of data from the SatNOGS network.
|
||||||
|
It is better to download from there to save on Libre Space Foundation
|
||||||
|
resources.
|
||||||
|
|
||||||
## SatNOGS Observation Data Mirror
|
* https://archive.org/details/satnogs
|
||||||
The downloaded waterfalls are available below via `http` and `rsync`.
|
|
||||||
Use this instead of downloading from SatNOGS to save their bandwidth.
|
To download, perhaps do something like the following.
|
||||||
|
Get an account at archive.org, then run this to set up your account locally:
|
||||||
|
|
||||||
```
|
```
|
||||||
# Something like:
|
ia configure
|
||||||
wget --mirror https://ml.spacecruft.org/download
|
|
||||||
# Or with rsync:
|
|
||||||
mkdir download
|
|
||||||
rsync -ultav rsync://ml.spacecruft.org/download/ download/
|
|
||||||
```
|
```
|
||||||
|
|
||||||
# TODO / Brainstorms
|
To download all the SatNOGS collections `.torrent` files from the
|
||||||
This is a first draft of how to do this. The actual machine learning
|
Internet Archive, run:
|
||||||
process hasn't been looked at at all, except to get it to generate
|
|
||||||
an answer. It has a long ways to go. There are also many ways to do
|
|
||||||
this besides using Tensorflow and Keras. Originally, I considered
|
|
||||||
using OpenCV. Ideas in no particular order below.
|
|
||||||
|
|
||||||
## General
|
```
|
||||||
General considerations.
|
wut-ia-torrents
|
||||||
|
```
|
||||||
* Use Open CV.
|
|
||||||
|
|
||||||
* Use something other than Tensorflow / Keras.
|
|
||||||
|
|
||||||
* Do mirror of `network.satnogs.org` and do API calls to it for data.
|
|
||||||
|
|
||||||
* Issues are now available here:
|
|
||||||
* https://spacecruft.org/spacecruft/satnogs-wut/issues
|
|
||||||
|
|
||||||
## Tensorflow / Keras
|
|
||||||
At present Tensorflow and Keras are used.
|
|
||||||
|
|
||||||
* Learn Keras / Tensorflow...
|
|
||||||
|
|
||||||
* What part of image is being evaluated?
|
|
||||||
|
|
||||||
* Re-evaluate each step.
|
|
||||||
|
|
||||||
* Right now the prediction output is just "good" or "bad", needs
|
|
||||||
"failed" too.
|
|
||||||
|
|
||||||
* Give confidence score in each prediction.
|
|
||||||
|
|
||||||
* Visualize what ML is looking at.
|
|
||||||
|
|
||||||
* Separate out good/bad/failed by satellite, transmitter, or encoding.
|
|
||||||
This way "good" isn't considering a "good" vetting to be a totally
|
|
||||||
different encoding. Right now, it is considering as good observations
|
|
||||||
that should be bad...
|
|
||||||
|
|
||||||
* If it has a low confidence, return "unknown" instead of "good" or "bad".
|
|
||||||
|
|
||||||
|
|
||||||
# Caveats
|
# Caveats
|
||||||
This is nearly the first machine learning script I've done,
|
This is the first artificial intelligence script I've done,
|
||||||
I know little about radio and less about satellites,
|
I know little about radio and less about satellites,
|
||||||
and I'm not a programmer.
|
and I'm not a programmer.
|
||||||
|
|
||||||
|
@ -361,4 +281,4 @@ Main repository is available here:
|
||||||
|
|
||||||
License: CC By SA 4.0 International and/or GPLv3+ at your discretion. Other code licensed under their own respective licenses.
|
License: CC By SA 4.0 International and/or GPLv3+ at your discretion. Other code licensed under their own respective licenses.
|
||||||
|
|
||||||
Copyright (C) 2019, 2020, Jeff Moe
|
Copyright (C) 2019, 2020, 2022 Jeff Moe
|
||||||
|
|
|
@ -0,0 +1,49 @@
|
||||||
|
# Distributed Computing
|
||||||
|
HOWTO Set up and run Tensorflow on multiple nodes.
|
||||||
|
This is to this particular configuration.
|
||||||
|
|
||||||
|
# Software
|
||||||
|
Main software in use:
|
||||||
|
|
||||||
|
* Debian
|
||||||
|
* Proxmox
|
||||||
|
* Ceph
|
||||||
|
* Python 3
|
||||||
|
* Tensorflow
|
||||||
|
* Jupyter
|
||||||
|
* `clusterssh`
|
||||||
|
|
||||||
|
# Installation
|
||||||
|
Major steps.
|
||||||
|
|
||||||
|
1. Install Proxmox on bare metal.
|
||||||
|
1. Clone Debian KVM Nodes.
|
||||||
|
1. Set up nodes.
|
||||||
|
1. Install Tensorflow.
|
||||||
|
1. Set up Ceph.
|
||||||
|
|
||||||
|
## Proxmox
|
||||||
|
Setting up Proxmox is outside the scope of this document.
|
||||||
|
All you really need is some virtual machines, however
|
||||||
|
they are created.
|
||||||
|
|
||||||
|
* https://www.proxmox.com/en/proxmox-ve
|
||||||
|
|
||||||
|
## Set up nodes
|
||||||
|
```
|
||||||
|
# On main workstation or node where you built tensorflow:
|
||||||
|
NODES="ml1 ml2 ml3 ml4 ml5"
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
# On worker nodes:
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install python3-pip sshfs jq
|
||||||
|
pip3 install --upgrade --user pip
|
||||||
|
# make sure new `pip3` at `~/.local/bin/pip3` is in front in `$PATH`.
|
||||||
|
pip3 install --upgrade --user -r requirements-node.txt
|
||||||
|
|
||||||
|
# If you have cloned the tensorflow repo, test with:
|
||||||
|
#python3 ~/devel/tensorflow/tensorflow/tensorflow/python/distribute/multi_worker_continuous_run_test.py
|
||||||
|
```
|
||||||
|
|
|
@ -0,0 +1,202 @@
|
||||||
|
# PyTorch
|
||||||
|
PyTorch is an alternative to TensorFlow.
|
||||||
|
|
||||||
|
If using a KVM, be sure CPU type is set to `host`.
|
||||||
|
|
||||||
|
## Get Source
|
||||||
|
Get PyTorch source code:
|
||||||
|
|
||||||
|
```
|
||||||
|
# This is about 1 gig:
|
||||||
|
git clone --recursive https://github.com/pytorch/pytorch
|
||||||
|
```
|
||||||
|
|
||||||
|
The recursive git repos contain a mix of permissive licenses, mostly
|
||||||
|
BSD, MIT, Apache style. No GPL. License owners are mostly Google
|
||||||
|
and Facebook, with a mix of many others.
|
||||||
|
|
||||||
|
## Build Py
|
||||||
|
Build from scratch with free software options.
|
||||||
|
PyTorch has a `CMakeLists.txt`, so lets see how it goes with a `cmake` build....
|
||||||
|
|
||||||
|
Install build dependencies:
|
||||||
|
```
|
||||||
|
apt install cmake cmake-curses-gui g++ python-yaml python-typing
|
||||||
|
# Note, it uses python-yaml, not python3-yaml...
|
||||||
|
|
||||||
|
# Optional deps:
|
||||||
|
apt install doxygen
|
||||||
|
apt install libfftw3-dev
|
||||||
|
apt install libgmp3-dev
|
||||||
|
apt install libmpfr-dev
|
||||||
|
apt install libmkldnn-dev
|
||||||
|
apt install libnuma-dev # Nope, not right one
|
||||||
|
# Optional deps for BLAS OpenBLAS
|
||||||
|
apt install libopenblas-dev
|
||||||
|
# OpenCV
|
||||||
|
apt install libopencv-dev
|
||||||
|
# pybind?
|
||||||
|
apt install pybind11-dev
|
||||||
|
pybind11_INCLUDE_DIR /usr/include/pybind11
|
||||||
|
|
||||||
|
# BLAS Eigen
|
||||||
|
apt install libeigen3-dev # fail?
|
||||||
|
# ffmpeg
|
||||||
|
apt install libavcodec-dev libavdevice-dev libavfilter-dev libavformat-dev libavresample-dev libavutil-dev libpostproc-dev libswresample-dev libswscale-dev ffmpeg
|
||||||
|
# Thusly:
|
||||||
|
USE_FFMPEG ON
|
||||||
|
FFMPEG_AVCODEC_INCLUDE_DIR /usr/include/x86_64-linux-gnu/libavcodec
|
||||||
|
FFMPEG_LIBAVCODEC /usr/lib/x86_64-linux-gnu
|
||||||
|
FFMPEG_LIBAVFORMAT /usr/lib/x86_64-linux-gnu
|
||||||
|
FFMPEG_LIBAVUTIL /usr/lib/x86_64-linux-gnu
|
||||||
|
FFMPEG_LIBSWSCALE /usr/lib/x86_64-linux-gnu
|
||||||
|
# or?
|
||||||
|
FFMPEG_AVCODEC_INCLUDE_DIR /usr/include/x86_64-linux-gnu/libavcodec
|
||||||
|
FFMPEG_LIBAVCODEC /usr/lib/x86_64-linux-gnu/libavcodec.so
|
||||||
|
FFMPEG_LIBAVFORMAT /usr/lib/x86_64-linux-gnu/libavformat.so
|
||||||
|
FFMPEG_LIBAVUTIL /usr/lib/x86_64-linux-gnu/libavutil.so
|
||||||
|
FFMPEG_LIBSWSCALE /usr/lib/x86_64-linux-gnu/libswscale.so
|
||||||
|
|
||||||
|
|
||||||
|
# XXX
|
||||||
|
TORCH_BUILD_VERSION default is 1.1.0
|
||||||
|
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake ..
|
||||||
|
ccmake ..
|
||||||
|
# build takes ~30 minutes
|
||||||
|
make -j8
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
#cmake note
|
||||||
|
Generated cmake files are only fully tested if one builds with system glog,
|
||||||
|
gflags, and protobuf. Other settings may generate files that are not well
|
||||||
|
tested.
|
||||||
|
|
||||||
|
# so maybe:
|
||||||
|
apt install libgflags-dev
|
||||||
|
apt install libprotobuf-dev
|
||||||
|
# glog?
|
||||||
|
apt install libgoogle-glog-dev # ???
|
||||||
|
```
|
||||||
|
|
||||||
|
Or:
|
||||||
|
|
||||||
|
```
|
||||||
|
python setup.py install
|
||||||
|
```
|
||||||
|
|
||||||
|
How docs say:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
||||||
|
python setup.py build --cmake-only
|
||||||
|
ccmake build # or cmake-gui build
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Notable build options:
|
||||||
|
|
||||||
|
```
|
||||||
|
BLAS --- has multiple options:
|
||||||
|
MKL (non-free bits possible)
|
||||||
|
vecLib (?)
|
||||||
|
FLAME (?)
|
||||||
|
Eigen (in Debian)
|
||||||
|
ATLAS (in Debian?)
|
||||||
|
OpenBLAS (in Debian)
|
||||||
|
```
|
||||||
|
|
||||||
|
More options:
|
||||||
|
|
||||||
|
```
|
||||||
|
BUILD_BINARY ON
|
||||||
|
BUILD_PYTHON ON
|
||||||
|
OPENMP_FOUND ON
|
||||||
|
USE_CUDA OFF
|
||||||
|
USE_CUDNN OFF
|
||||||
|
USE_DISTRIBUTED ON
|
||||||
|
USE_FFMPEG ON
|
||||||
|
USE_MKLDNN ON ?? with Debian's packages ?
|
||||||
|
USE_MKLDNN_CBLAS NO ?
|
||||||
|
USE_LEVELDB ?? Available in debian.
|
||||||
|
USE_LMDB ?? Available in Debian.
|
||||||
|
USE_MPI ON heh
|
||||||
|
USE_NCCL OFF nvidia
|
||||||
|
USE_NUMA ON ?
|
||||||
|
USE_NUMPY ON
|
||||||
|
USE_OBSERVERS ON ?
|
||||||
|
USE_OPENCL ON ?
|
||||||
|
USE_OPENCV ON ?
|
||||||
|
USE_OPENMP ON ?
|
||||||
|
USE_REDIS ?
|
||||||
|
USE_ROCKSDB Available in Debian, like leveldb and lmdb
|
||||||
|
USE_ZMQ Available in Debian, messaging.
|
||||||
|
USE_ZSTD Available in Debian, compression.
|
||||||
|
WITH_BLAS ?
|
||||||
|
WITH_OPENMP ON
|
||||||
|
CAFFE2_USE_MSVC_STATIC_RUNTIME OFF ??
|
||||||
|
BUILD_CAFFE2_MOBILE OFF
|
||||||
|
```
|
||||||
|
|
||||||
|
# More misc...
|
||||||
|
```
|
||||||
|
git checkout v1.4.0
|
||||||
|
apt install python3-dev python3-numpy python-numpy
|
||||||
|
# uh
|
||||||
|
apt install libcaffe-cpu-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
Seems to be using not python3??
|
||||||
|
|
||||||
|
|
||||||
|
# Misc
|
||||||
|
```
|
||||||
|
[E init_intrinsics_check.cc:43] CPU feature avx is present on your machine, but the Caffe2 binary is not compiled with it. It means you may not get the full speed of your CPU.
|
||||||
|
```
|
||||||
|
|
||||||
|
# Build PyTorch `pip`
|
||||||
|
|
||||||
|
Lets get old Python 2 out of here:
|
||||||
|
|
||||||
|
```
|
||||||
|
apt autoremove --purge python2 python2-minimal python2.7 python2.7-minimal libpython2.7 libpython2.7-minimal libpython2.7-stdlib
|
||||||
|
git checkout v1.4.0
|
||||||
|
apt install python3-pip python3-setuptools
|
||||||
|
# Docs recommend this, but mkl is proprietary:
|
||||||
|
pip3 install --user numpy ninja pyyaml mkl mkl-include setuptools cmake cffi
|
||||||
|
# Try:
|
||||||
|
pip3 install --user --upgrade pip
|
||||||
|
# Set that new pip in your path, ~/.local/bin/ in ~/.bashrc:
|
||||||
|
export PATH="~/.local/bin:/usr/lib/ccache:$PATH"
|
||||||
|
|
||||||
|
pip3 install --user cmake
|
||||||
|
pip3 install --user numpy ninja pyyaml setuptools cmake cffi
|
||||||
|
# From upstream docs:
|
||||||
|
pip install torch==1.4.0+cpu torchvision==0.5.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
||||||
|
```
|
||||||
|
|
||||||
|
# Build with Python Setup
|
||||||
|
|
||||||
|
```
|
||||||
|
git submodule sync
|
||||||
|
git submodule update --init --recursive
|
||||||
|
python3 setup.py build --cmake-only
|
||||||
|
ccmake build
|
||||||
|
python3 setup.py install --user
|
||||||
|
```
|
||||||
|
|
||||||
|
# Proprietary Bits
|
||||||
|
Parts to avoid in the PyTorch ecosystem:
|
||||||
|
|
||||||
|
* Anaconda is a package manager for Python.The Anaconda repository contains non-free software,
|
||||||
|
so don't use it.
|
||||||
|
|
||||||
|
* "MKL" (Intel's non-free binaries).
|
||||||
|
|
||||||
|
* Note, these MKL packages are in Debian: `libmkldnn0 libmkldnn-dev libmkldnn-doc`
|
||||||
|
|
||||||
|
* Intel compiler.
|
||||||
|
|
|
@ -0,0 +1,134 @@
|
||||||
|
# TensorFlow Serving
|
||||||
|
HOWTO Set up and run TensorFlow Serving.
|
||||||
|
This is to this particular configuration.
|
||||||
|
|
||||||
|
# Software
|
||||||
|
Main software in use:
|
||||||
|
|
||||||
|
* Debian
|
||||||
|
* Proxmox
|
||||||
|
* Ceph
|
||||||
|
* Python 3
|
||||||
|
* TensorFlow Serving
|
||||||
|
|
||||||
|
# Installation
|
||||||
|
Install TensorFlow Serving. The recommended way is using `docker`, but
|
||||||
|
here we build from source.
|
||||||
|
|
||||||
|
* https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/setup.md
|
||||||
|
|
||||||
|
* https://github.com/tensorflow/serving/blob/master/tensorflow_serving/tools/docker/Dockerfile.devel
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Install Bazel
|
||||||
|
Instead of building Bazel, download the version that TensorFlow Serving builds with,
|
||||||
|
currently version `1.2.1` as shown in the docker builder.
|
||||||
|
|
||||||
|
Install Bazel dependencies:
|
||||||
|
|
||||||
|
```
|
||||||
|
# Note, this installs python 2.7....
|
||||||
|
apt install g++ zlib1g-dev unzip python
|
||||||
|
```
|
||||||
|
|
||||||
|
Install bazel .deb from releases here.
|
||||||
|
* firefox https://github.com/bazelbuild/bazel/releases
|
||||||
|
|
||||||
|
Note get the version Serving wants, not the latest release.
|
||||||
|
|
||||||
|
```
|
||||||
|
wget https://github.com/bazelbuild/bazel/releases/download/1.2.1/bazel_1.2.1-linux-x86_64.deb
|
||||||
|
wget https://github.com/bazelbuild/bazel/releases/download/1.2.1/bazel_1.2.1-linux-x86_64.deb.sha256
|
||||||
|
dpkg -i bazel_1.2.1-linux-x86_64.deb
|
||||||
|
apt -f install
|
||||||
|
```
|
||||||
|
|
||||||
|
## Install Dependencies
|
||||||
|
Dependencies. Note, there are likely fewer dependencies than listed
|
||||||
|
in the docs, since `bazel` is installed from `.deb`, not built.
|
||||||
|
|
||||||
|
```
|
||||||
|
apt update
|
||||||
|
apt install --no-install-recommends \
|
||||||
|
automake \
|
||||||
|
build-essential \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
git \
|
||||||
|
libcurl3-dev \
|
||||||
|
libfreetype6-dev \
|
||||||
|
libpng-dev \
|
||||||
|
libtool \
|
||||||
|
libzmq3-dev \
|
||||||
|
mlocate \
|
||||||
|
pkg-config \
|
||||||
|
python-dev \
|
||||||
|
software-properties-common \
|
||||||
|
swig \
|
||||||
|
unzip \
|
||||||
|
wget \
|
||||||
|
zip \
|
||||||
|
zlib1g-dev \
|
||||||
|
python3-distutils
|
||||||
|
```
|
||||||
|
|
||||||
|
Not installed:
|
||||||
|
```
|
||||||
|
openjdk-8-jdk \
|
||||||
|
openjdk-8-jre-headless \
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Compile TensorFlow Serving
|
||||||
|
HOWTO compile TensorFlow Serving:
|
||||||
|
|
||||||
|
```
|
||||||
|
git clone https://github.com/tensorflow/serving
|
||||||
|
cd serving
|
||||||
|
|
||||||
|
git checkout 2.1.0
|
||||||
|
|
||||||
|
bazel build --color=yes --curses=yes \
|
||||||
|
${TF_SERVING_BAZEL_OPTIONS} \
|
||||||
|
--verbose_failures \
|
||||||
|
--output_filter=DONT_MATCH_ANYTHING \
|
||||||
|
${TF_SERVING_BUILD_OPTIONS} \
|
||||||
|
tensorflow_serving/model_servers:tensorflow_model_server
|
||||||
|
```
|
||||||
|
|
||||||
|
Build `pip` package:
|
||||||
|
|
||||||
|
```
|
||||||
|
bazel build --color=yes --curses=yes \
|
||||||
|
${TF_SERVING_BAZEL_OPTIONS} \
|
||||||
|
--verbose_failures \
|
||||||
|
--output_filter=DONT_MATCH_ANYTHING \
|
||||||
|
${TF_SERVING_BUILD_OPTIONS} \
|
||||||
|
tensorflow_serving/tools/pip_package:build_pip_package
|
||||||
|
|
||||||
|
|
||||||
|
bazel-bin/tensorflow_serving/tools/pip_package/build_pip_package \
|
||||||
|
/tmp/pip
|
||||||
|
```
|
||||||
|
|
||||||
|
# Install TensorFlow Server with `pip`
|
||||||
|
Install with `pip`, don't use bazel, or build, etc.
|
||||||
|
|
||||||
|
```
|
||||||
|
# Set PATH, add to ~/.bashrc
|
||||||
|
export PATH="~/.local/bin:/usr/lib/ccache:$PATH"
|
||||||
|
. ~/.bashrc
|
||||||
|
sudo apt install python3-pip
|
||||||
|
pip3 install --upgrade --user pip
|
||||||
|
# Should return the one in ~/.local:
|
||||||
|
which pip3
|
||||||
|
# Install it, needs ~3 gigs free in /tmp
|
||||||
|
pip3 install --user tensorflow-serving-api
|
||||||
|
```
|
||||||
|
|
||||||
|
# Misc
|
||||||
|
See also:
|
||||||
|
|
||||||
|
* https://github.com/tobegit3hub/simple_tensorflow_serving
|
||||||
|
|
|
@ -0,0 +1,57 @@
|
||||||
|
# Voila
|
||||||
|
|
||||||
|
Voila is a way to turn Jupyter notebooks into web applications.
|
||||||
|
|
||||||
|
# Install
|
||||||
|
Start with basic Debian Buster install.
|
||||||
|
|
||||||
|
```
|
||||||
|
# set up partitions
|
||||||
|
# XXX deps...
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install apache2 python3-certbot-apache python3-pip sshfs npm nodejs
|
||||||
|
certbot
|
||||||
|
systemctl restart apache2
|
||||||
|
adduser wut
|
||||||
|
sudo su - wut
|
||||||
|
pip3 install --user --upgrade pip
|
||||||
|
# make sure new `pip3` at `~/.local/bin/pip3` is in front in `$PATH`.
|
||||||
|
echo 'PATH=~/.local/bin:$PATH' >> ~/.bashrc
|
||||||
|
```
|
||||||
|
|
||||||
|
logout #log back in as user wut
|
||||||
|
sudo su - wut
|
||||||
|
# Install Python packages for Voila
|
||||||
|
pip3 install --user --upgrade -r requirements-voila.txt
|
||||||
|
# Enable Jupyter extensions
|
||||||
|
jupyter nbextension enable --py widgetsnbextension
|
||||||
|
#jupyter labextension install @jupyter-widgets/jupyterlab-manager
|
||||||
|
#jupyter serverextension enable --py jupyterlab --user
|
||||||
|
```
|
||||||
|
|
||||||
|
* Set up hosts file, network, etc.
|
||||||
|
|
||||||
|
* Set up apache proxy
|
||||||
|
|
||||||
|
```
|
||||||
|
# Cruft to start voila:
|
||||||
|
cd /srv/satnogs/satnogs-wut/notebooks/
|
||||||
|
|
||||||
|
voila \
|
||||||
|
--ExecutePreprocessor.timeout=600 \
|
||||||
|
--no-browser \
|
||||||
|
--port=8867 \
|
||||||
|
--autoreload=True \
|
||||||
|
--Voila.ip=localhost \
|
||||||
|
--VoilaConfiguration.enable_nbextensions=False \
|
||||||
|
--theme=dark \
|
||||||
|
wut-web.ipynb \
|
||||||
|
1>>~/log/voila.log 2>>~/log/voila.err &
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
# wut?
|
||||||
|
Site:
|
||||||
|
|
||||||
|
* https://spacecruft.org/spacecruft/satnogs-wut/
|
||||||
|
|
Binary file not shown.
|
@ -0,0 +1,22 @@
|
||||||
|
[Unit]
|
||||||
|
Description=voila-wut-alpha
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Environment="PATH=/home/wut/.local/bin:/usr/local/bin:/usr/bin:/bin"
|
||||||
|
Type=simple
|
||||||
|
PIDFile=/run/voila.pid
|
||||||
|
ExecStart=/home/wut/.local/bin/voila --no-browser --port=8871 --ExecutePreprocessor.timeout=600 --autoreload=True --Voila.ip=localhost --VoilaConfiguration.enable_nbextensions=False --theme=dark devel/spacecruft/satnogs-wut/notebooks/wut-web-alpha.ipynb
|
||||||
|
WorkingDirectory=/home/wut
|
||||||
|
User=wut
|
||||||
|
Group=wut
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
[Unit]
|
||||||
|
Description=voila-wut-beta
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Environment="PATH=/home/wut/.local/bin:/usr/local/bin:/usr/bin:/bin"
|
||||||
|
Type=simple
|
||||||
|
PIDFile=/run/voila.pid
|
||||||
|
ExecStart=/home/wut/.local/bin/voila --no-browser --port=8873 --ExecutePreprocessor.timeout=600 --autoreload=True --Voila.ip=localhost --VoilaConfiguration.enable_nbextensions=False --theme=dark devel/spacecruft/satnogs-wut/notebooks/wut-web-beta.ipynb
|
||||||
|
WorkingDirectory=/home/wut
|
||||||
|
User=wut
|
||||||
|
Group=wut
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
[Unit]
|
||||||
|
Description=voila-wut
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Environment="PATH=/home/wut/.local/bin:/usr/local/bin:/usr/bin:/bin"
|
||||||
|
Type=simple
|
||||||
|
PIDFile=/run/voila.pid
|
||||||
|
ExecStart=/home/wut/.local/bin/voila --no-browser --port=8867 --ExecutePreprocessor.timeout=600 --autoreload=True --Voila.ip=localhost --VoilaConfiguration.enable_nbextensions=False --theme=dark devel/spacecruft/satnogs-wut/notebooks/wut-web.ipynb
|
||||||
|
WorkingDirectory=/home/wut
|
||||||
|
User=wut
|
||||||
|
Group=wut
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -10,24 +10,10 @@
|
||||||
"#\n",
|
"#\n",
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
||||||
"# Based on data/train and data/val directories builds a wut.h5 file.\n",
|
"# Based on data/train and data/val directories builds a wut.h5 file.\n",
|
||||||
"# Reads wut.h5 and tests files in data/test/unvetted/"
|
"# Reads wut.h5 and tests files in data/test/unvetted/\n",
|
||||||
]
|
"#\n",
|
||||||
},
|
"# GPLv3+\n",
|
||||||
{
|
"#\n",
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# GPLv3+"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Built using Jupyter, Tensorflow, Keras"
|
"# Built using Jupyter, Tensorflow, Keras"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -37,24 +23,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Start\")"
|
"import os\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import os"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import numpy as np"
|
"import numpy as np"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -64,7 +33,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import tensorflow.python.keras"
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -73,24 +42,23 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from tensorflow.python.keras import Sequential\n",
|
"import tensorflow as tf\n",
|
||||||
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
"from tensorflow import keras\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
"from tensorflow.keras import layers\n",
|
||||||
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
"from tensorflow.keras import optimizers\n",
|
||||||
"from tensorflow.python.keras import optimizers\n",
|
"from tensorflow.keras import Sequential\n",
|
||||||
"from tensorflow.python.keras.preprocessing import image\n",
|
"from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
"from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import load_img\n",
|
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import img_to_array"
|
"from tensorflow.keras.layers import Input, concatenate\n",
|
||||||
]
|
"from tensorflow.keras.models import load_model\n",
|
||||||
},
|
"from tensorflow.keras.models import Model\n",
|
||||||
{
|
"from tensorflow.keras.preprocessing import image\n",
|
||||||
"cell_type": "code",
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
||||||
"execution_count": null,
|
"from tensorflow.keras.preprocessing.image import img_to_array\n",
|
||||||
"metadata": {},
|
"from tensorflow.keras.preprocessing.image import load_img\n",
|
||||||
"outputs": [],
|
"from tensorflow.keras.utils import model_to_dot\n",
|
||||||
"source": [
|
"from tensorflow.keras.utils import plot_model"
|
||||||
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -103,39 +71,15 @@
|
||||||
"%matplotlib inline\n",
|
"%matplotlib inline\n",
|
||||||
"import matplotlib.pyplot as plt\n",
|
"import matplotlib.pyplot as plt\n",
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"from sklearn.decomposition import PCA"
|
"from sklearn.decomposition import PCA\n",
|
||||||
]
|
"\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Seaborn pip dependency\n",
|
"# Seaborn pip dependency\n",
|
||||||
"import seaborn as sns"
|
"import seaborn as sns\n",
|
||||||
]
|
"\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Interact\n",
|
|
||||||
"# https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html\n",
|
|
||||||
"from __future__ import print_function\n",
|
"from __future__ import print_function\n",
|
||||||
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
||||||
"import ipywidgets as widgets"
|
"import ipywidgets as widgets\n",
|
||||||
]
|
"\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Display Images\n",
|
|
||||||
"from IPython.display import display, Image"
|
"from IPython.display import display, Image"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -145,7 +89,13 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Python import done\")"
|
"#ENCODING='APT'\n",
|
||||||
|
"#ENCODING='CW'\n",
|
||||||
|
"#ENCODING='FM'\n",
|
||||||
|
"#ENCODING='FSK9k6'\n",
|
||||||
|
"ENCODING='GMSK2k4'\n",
|
||||||
|
"#ENCODING='GMSK4k8'\n",
|
||||||
|
"#ENCODING='USB'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -154,7 +104,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Load HDF file\")"
|
"h5_file=(\"wut-\" + ENCODING + \".h5\")\n",
|
||||||
|
"model_path_h5 = os.path.join('/srv/satnogs/data/models/', ENCODING, h5_file)\n",
|
||||||
|
"print(model_path_h5)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -163,7 +115,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model = load_model('data/models/wut-DUV.tf')"
|
"model = load_model(model_path_h5)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -172,16 +124,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"test_dir = os.path.join('data/', 'test')"
|
"test_dir = os.path.join('/srv/satnogs/data/', 'test')\n",
|
||||||
]
|
"num_test = len(os.listdir(test_dir))\n",
|
||||||
},
|
"print(\"Will test\", num_test, \"waterfall PNG files under this driectory:\\n\", test_dir)"
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"num_test = len(os.listdir(test_dir))"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -191,11 +136,11 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Good results\n",
|
"# Good results\n",
|
||||||
"#batch_size = 128\n",
|
"batch_size = 128\n",
|
||||||
"#epochs = 6\n",
|
"epochs = 6\n",
|
||||||
"# Testing, faster more inaccurate results\n",
|
"# Testing, faster more inaccurate results\n",
|
||||||
"batch_size = 32\n",
|
"#batch_size = 32\n",
|
||||||
"epochs = 3"
|
"#epochs = 3"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -223,15 +168,6 @@
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(test_dir)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
|
@ -247,7 +183,9 @@
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"sample_test_images, _ = next(test_data_gen)"
|
"sample_test_images, _ = next(test_data_gen)"
|
||||||
|
@ -259,7 +197,15 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
|
"print(\"Number of observations to test:\", num_test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
"def plotImages(images_arr):\n",
|
"def plotImages(images_arr):\n",
|
||||||
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
||||||
" axes = axes.flatten()\n",
|
" axes = axes.flatten()\n",
|
||||||
|
@ -276,28 +222,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"plotImages(sample_test_images[0:1])"
|
"plotImages(sample_test_images[0:2])"
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# https://keras.io/models/sequential/\n",
|
|
||||||
"print(\"predict\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#pred=model.predict_generator(test_data_gen,\n",
|
|
||||||
"#steps=1,\n",
|
|
||||||
"#verbose=1)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -309,8 +234,7 @@
|
||||||
"prediction = model.predict(\n",
|
"prediction = model.predict(\n",
|
||||||
" x=test_data_gen,\n",
|
" x=test_data_gen,\n",
|
||||||
" verbose=1\n",
|
" verbose=1\n",
|
||||||
")\n",
|
")"
|
||||||
"print(\"end predict\")"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -328,7 +252,6 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Show prediction score\n",
|
|
||||||
"print(prediction)"
|
"print(prediction)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -372,14 +295,12 @@
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": []
|
||||||
"# The End"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
|
@ -393,7 +314,7 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.3"
|
"version": "3.10.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
|
|
@ -1,542 +0,0 @@
|
||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# wut-train-cluster --- What U Think? SatNOGS Observation AI, training application cluster edition.\n",
|
|
||||||
"#\n",
|
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
|
||||||
"#\n",
|
|
||||||
"# Based on data/train and data/val directories builds a wut.tf file.\n",
|
|
||||||
"# GPLv3+\n",
|
|
||||||
"# Built using Jupyter, Tensorflow, Keras"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from __future__ import absolute_import, division, print_function, unicode_literals\n",
|
|
||||||
"from __future__ import print_function\n",
|
|
||||||
"import os\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import simplejson as json\n",
|
|
||||||
"import datetime"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import tensorflow as tf\n",
|
|
||||||
"import tensorflow.python.keras\n",
|
|
||||||
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
|
|
||||||
"from tensorflow.python.keras import optimizers\n",
|
|
||||||
"from tensorflow.python.keras import Sequential\n",
|
|
||||||
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
|
||||||
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
|
||||||
"from tensorflow.python.keras.layers import Input, concatenate\n",
|
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
|
||||||
"from tensorflow.python.keras.models import Model\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing import image\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import img_to_array\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import load_img\n",
|
|
||||||
"from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#%matplotlib inline\n",
|
|
||||||
"#import matplotlib.pyplot as plt\n",
|
|
||||||
"#import seaborn as sns\n",
|
|
||||||
"#from sklearn.decomposition import PCA\n",
|
|
||||||
"#from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
|
||||||
"#import ipywidgets as widgets\n",
|
|
||||||
"#from IPython.display import display, Image"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print('tf {}'.format(tf.__version__))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"os.environ[\"TF_CONFIG\"] = json.dumps({\n",
|
|
||||||
" \"cluster\": {\n",
|
|
||||||
" \"worker\": [ \"ml0-int:2222\", \"ml1-int:2222\", \"ml2-int:2222\", \"ml3-int:2222\", \"ml4-int:2222\", \"ml5-int:2222\" ]\n",
|
|
||||||
" },\n",
|
|
||||||
" \"task\": {\"type\": \"worker\", \"index\": 0 },\n",
|
|
||||||
" \"num_workers\": 6\n",
|
|
||||||
"})"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"IMG_HEIGHT = 416\n",
|
|
||||||
"IMG_WIDTH= 804\n",
|
|
||||||
"batch_size = 32\n",
|
|
||||||
"epochs = 1\n",
|
|
||||||
"# Full size, machine barfs probably needs more RAM\n",
|
|
||||||
"#IMG_HEIGHT = 832\n",
|
|
||||||
"#IMG_WIDTH = 1606\n",
|
|
||||||
"# Good results\n",
|
|
||||||
"#batch_size = 128\n",
|
|
||||||
"#epochs = 6"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"tf.keras.backend.clear_session()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"options = tf.data.Options()\n",
|
|
||||||
"#options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF\n",
|
|
||||||
"options.experimental_distribute.auto_shard_policy = AutoShardPolicy.DATA\n",
|
|
||||||
"# XXX\n",
|
|
||||||
"#dataset = dataset.with_options(options)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(\n",
|
|
||||||
" tf.distribute.experimental.CollectiveCommunication.RING)\n",
|
|
||||||
"\n",
|
|
||||||
"#mirrored_strategy = tf.distribute.MirroredStrategy(\n",
|
|
||||||
"# cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"root_data_dir = ('/srv/satnogs')\n",
|
|
||||||
"train_dir = os.path.join(root_data_dir, 'data/', 'train')\n",
|
|
||||||
"val_dir = os.path.join(root_data_dir,'data/', 'val')\n",
|
|
||||||
"train_good_dir = os.path.join(train_dir, 'good')\n",
|
|
||||||
"train_bad_dir = os.path.join(train_dir, 'bad')\n",
|
|
||||||
"val_good_dir = os.path.join(val_dir, 'good')\n",
|
|
||||||
"val_bad_dir = os.path.join(val_dir, 'bad')\n",
|
|
||||||
"num_train_good = len(os.listdir(train_good_dir))\n",
|
|
||||||
"num_train_bad = len(os.listdir(train_bad_dir))\n",
|
|
||||||
"num_val_good = len(os.listdir(val_good_dir))\n",
|
|
||||||
"num_val_bad = len(os.listdir(val_bad_dir))\n",
|
|
||||||
"total_train = num_train_good + num_train_bad\n",
|
|
||||||
"total_val = num_val_good + num_val_bad"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print('total training good images:', num_train_good)\n",
|
|
||||||
"print('total training bad images:', num_train_bad)\n",
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Total training images:\", total_train)\n",
|
|
||||||
"print('total validation good images:', num_val_good)\n",
|
|
||||||
"print('total validation bad images:', num_val_bad)\n",
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Total validation images:\", total_val)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Reduce training and validation set when testing\")\n",
|
|
||||||
"total_train = 100\n",
|
|
||||||
"total_val = 100\n",
|
|
||||||
"print(\"Reduced training images:\", total_train)\n",
|
|
||||||
"print(\"Reduced validation images:\", total_val)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"train_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")\n",
|
|
||||||
"val_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")\n",
|
|
||||||
"#train_data_gen = train_image_generator.flow_from_directory(batch_size=GLOBAL_BATCH_SIZE,\n",
|
|
||||||
"train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,\n",
|
|
||||||
" directory=train_dir,\n",
|
|
||||||
" shuffle=True,\n",
|
|
||||||
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
|
||||||
" class_mode='binary')\n",
|
|
||||||
"#val_data_gen = val_image_generator.flow_from_directory(batch_size=GLOBAL_BATCH_SIZE,\n",
|
|
||||||
"val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,\n",
|
|
||||||
" directory=val_dir,\n",
|
|
||||||
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
|
||||||
" class_mode='binary')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#train_dist_dataset = strategy.experimental_distribute_dataset()\n",
|
|
||||||
"#val_dist_dataset = strategy.experimental_distribute_dataset()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#sample_train_images, _ = next(train_data_gen)\n",
|
|
||||||
"#sample_val_images, _ = next(val_data_gen)\n",
|
|
||||||
"## This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
|
|
||||||
"#def plotImages(images_arr):\n",
|
|
||||||
"# fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
|
||||||
"# axes = axes.flatten()\n",
|
|
||||||
"# for img, ax in zip( images_arr, axes):\n",
|
|
||||||
"# ax.imshow(img)\n",
|
|
||||||
"# ax.axis('off')\n",
|
|
||||||
"# plt.tight_layout()\n",
|
|
||||||
"# plt.show()\n",
|
|
||||||
"# \n",
|
|
||||||
"#plotImages(sample_train_images[0:3])\n",
|
|
||||||
"#plotImages(sample_val_images[0:3])"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#%load_ext tensorboard\n",
|
|
||||||
"#!rm -rf ./clusterlogs/\n",
|
|
||||||
"#log_dir=\"clusterlogs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n",
|
|
||||||
"#log_dir=\"clusterlogs\"\n",
|
|
||||||
"#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n",
|
|
||||||
"#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir)\n",
|
|
||||||
"#%tensorboard --logdir clusterlogs --port 6006"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"strategy.num_replicas_in_sync"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"## Compute global batch size using number of replicas.\n",
|
|
||||||
"#GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS\n",
|
|
||||||
"BATCH_SIZE_PER_REPLICA = 8\n",
|
|
||||||
"print(\"BATCH_SIZE_PER_REPLICA\", BATCH_SIZE_PER_REPLICA)\n",
|
|
||||||
"print(\"strategy.num_replicas_in_sync\", strategy.num_replicas_in_sync)\n",
|
|
||||||
"global_batch_size = (BATCH_SIZE_PER_REPLICA *\n",
|
|
||||||
" strategy.num_replicas_in_sync)\n",
|
|
||||||
"print(\"global_batch_size\", global_batch_size)\n",
|
|
||||||
"print(\"total_train\", total_train)\n",
|
|
||||||
"print(\"total_val \", total_val)\n",
|
|
||||||
"print(\"batch_size\", batch_size)\n",
|
|
||||||
"print(\"total_train // batch_size\", total_train // batch_size)\n",
|
|
||||||
"print(\"total_val // batch_size\", total_val // batch_size)\n",
|
|
||||||
"#.batch(global_batch_size)\n",
|
|
||||||
"#dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100)\n",
|
|
||||||
"#dataset = dataset.batch(global_batch_size)\n",
|
|
||||||
"#LEARNING_RATES_BY_BATCH_SIZE = {5: 0.1, 10: 0.15}"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#learning_rate = LEARNING_RATES_BY_BATCH_SIZE[global_batch_size]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def get_uncompiled_model():\n",
|
|
||||||
" model = Sequential([\n",
|
|
||||||
" Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Conv2D(32, 3, padding='same', activation='relu'),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Conv2D(64, 3, padding='same', activation='relu'),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Flatten(),\n",
|
|
||||||
" Dense(512, activation='relu'),\n",
|
|
||||||
" Dense(1, activation='sigmoid')\n",
|
|
||||||
" ])\n",
|
|
||||||
" return model"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#get_uncompiled_model()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def get_compiled_model():\n",
|
|
||||||
" model = get_uncompiled_model()\n",
|
|
||||||
" model.compile(optimizer='adam',\n",
|
|
||||||
" loss='binary_crossentropy',\n",
|
|
||||||
" metrics=['accuracy'])\n",
|
|
||||||
" return model"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Create a checkpoint directory to store the checkpoints.\n",
|
|
||||||
"#checkpoint_dir = './training_checkpoints'\n",
|
|
||||||
"#checkpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='tmp/keras-ckpt')]\n",
|
|
||||||
"#callbacks=[tensorboard_callback,callbacks]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#def get_fit_model():\n",
|
|
||||||
"# model = get_compiled_model()\n",
|
|
||||||
"# model.fit(\n",
|
|
||||||
"# train_data_gen,\n",
|
|
||||||
"# steps_per_epoch=total_train // batch_size,\n",
|
|
||||||
"# epochs=epochs,\n",
|
|
||||||
"# validation_data=val_data_gen,\n",
|
|
||||||
"# validation_steps=total_val // batch_size,\n",
|
|
||||||
"# verbose=2\n",
|
|
||||||
"# )\n",
|
|
||||||
"#return model"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with strategy.scope():\n",
|
|
||||||
" model = get_compiled_model()\n",
|
|
||||||
" history = model.fit(\n",
|
|
||||||
" train_data_gen,\n",
|
|
||||||
" steps_per_epoch=total_train // batch_size,\n",
|
|
||||||
" epochs=epochs,\n",
|
|
||||||
" validation_data=val_data_gen,\n",
|
|
||||||
" validation_steps=total_val // batch_size,\n",
|
|
||||||
" verbose=2\n",
|
|
||||||
" ).batch(global_batch_size)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#model.summary()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"TRAINING info\")\n",
|
|
||||||
"print(train_dir)\n",
|
|
||||||
"print(train_good_dir)\n",
|
|
||||||
"print(train_bad_dir)\n",
|
|
||||||
"print(train_image_generator)\n",
|
|
||||||
"print(train_data_gen)\n",
|
|
||||||
"#print(sample_train_images)\n",
|
|
||||||
"#print(history)\n",
|
|
||||||
"#model.to_json()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#history = model.fit(X, y, batch_size=32, epochs=40, validation_split=0.1)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save('data/models/FOO/wut-train-cluster2.tf')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save('data/models/FOO/wut-train-cluster2.h5')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save_weights('data/models/FOO/wut-weights-train-cluster2.tf')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save_weights('data/models/FOO/wut-weights-train-cluster2.h5')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"acc = history.history['accuracy']\n",
|
|
||||||
"val_acc = history.history['val_accuracy']\n",
|
|
||||||
"loss = history.history['loss']\n",
|
|
||||||
"val_loss = history.history['val_loss']\n",
|
|
||||||
"epochs_range = range(epochs)\n",
|
|
||||||
"plt.figure(figsize=(8, 8))\n",
|
|
||||||
"plt.subplot(1, 2, 1)\n",
|
|
||||||
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
|
|
||||||
"plt.plot(epochs_range, val_acc, label='Validation Accuracy')\n",
|
|
||||||
"plt.legend(loc='lower right')\n",
|
|
||||||
"plt.title('Training and Validation Accuracy')\n",
|
|
||||||
"plt.subplot(1, 2, 2)\n",
|
|
||||||
"plt.plot(epochs_range, loss, label='Training Loss')\n",
|
|
||||||
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
|
|
||||||
"plt.legend(loc='upper right')\n",
|
|
||||||
"plt.title('Training and Validation Loss')\n",
|
|
||||||
"plt.show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# The End"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.7.3"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 4
|
|
||||||
}
|
|
|
@ -1,410 +0,0 @@
|
||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# wut-train-cluster --- What U Think? SatNOGS Observation AI, training application cluster edition.\n",
|
|
||||||
"#\n",
|
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
|
||||||
"#\n",
|
|
||||||
"# Based on data/train and data/val directories builds a wut.tf file.\n",
|
|
||||||
"# GPLv3+\n",
|
|
||||||
"# Built using Jupyter, Tensorflow, Keras"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from __future__ import absolute_import, division, print_function, unicode_literals\n",
|
|
||||||
"from __future__ import print_function\n",
|
|
||||||
"import os\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import simplejson as json\n",
|
|
||||||
"import datetime"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import tensorflow as tf\n",
|
|
||||||
"import tensorflow.python.keras\n",
|
|
||||||
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
|
|
||||||
"from tensorflow.python.keras import optimizers\n",
|
|
||||||
"from tensorflow.python.keras import Sequential\n",
|
|
||||||
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
|
||||||
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
|
||||||
"from tensorflow.python.keras.layers import Input, concatenate\n",
|
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
|
||||||
"from tensorflow.python.keras.models import Model\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing import image\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import img_to_array\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import load_img"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%matplotlib inline\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"import seaborn as sns\n",
|
|
||||||
"from sklearn.decomposition import PCA\n",
|
|
||||||
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
|
||||||
"import ipywidgets as widgets\n",
|
|
||||||
"from IPython.display import display, Image"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"os.environ[\"TF_CONFIG\"] = json.dumps({\n",
|
|
||||||
" \"cluster\": {\n",
|
|
||||||
" \"worker\": [ \"ml1:2222\", \"ml2:2222\", \"ml3:2222\", \"ml4:2222\", \"ml5:2222\" ]\n",
|
|
||||||
" },\n",
|
|
||||||
" \"task\": {\"type\": \"worker\", \"index\": 1 },\n",
|
|
||||||
" \"num_workers\": 5\n",
|
|
||||||
"})"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"IMG_HEIGHT = 416\n",
|
|
||||||
"IMG_WIDTH= 804\n",
|
|
||||||
"batch_size = 128\n",
|
|
||||||
"epochs = 32\n",
|
|
||||||
"# Full size, machine barfs probably needs more RAM\n",
|
|
||||||
"#IMG_HEIGHT = 832\n",
|
|
||||||
"#IMG_WIDTH = 1606\n",
|
|
||||||
"# Good results\n",
|
|
||||||
"#batch_size = 128\n",
|
|
||||||
"#epochs = 6"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#from tensorflow.python.framework.ops import disable_eager_execution\n",
|
|
||||||
"#disable_eager_execution()\n",
|
|
||||||
"# MultiWorkerMirroredStrategy needs TF_CONFIG\n",
|
|
||||||
"#multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n",
|
|
||||||
"# Central Storage Strategy\n",
|
|
||||||
"#central_storage_strategy = tf.distribute.experimental.CentralStorageStrategy()\n",
|
|
||||||
"# ParameterServerStrategy needs TF_CONFIG\n",
|
|
||||||
"#ps_strategy = tf.distribute.experimental.ParameterServerStrategy()\n",
|
|
||||||
"# OneDeviceStrategy No cluster\n",
|
|
||||||
"#strategy = tf.distribute.OneDeviceStrategy(device=\"/CPU:0\")\n",
|
|
||||||
"# Mirrored Strategy\n",
|
|
||||||
"mirrored_strategy = tf.distribute.MirroredStrategy()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"train_dir = os.path.join('data/', 'train')\n",
|
|
||||||
"val_dir = os.path.join('data/', 'val')\n",
|
|
||||||
"train_good_dir = os.path.join(train_dir, 'good')\n",
|
|
||||||
"train_bad_dir = os.path.join(train_dir, 'bad')\n",
|
|
||||||
"val_good_dir = os.path.join(val_dir, 'good')\n",
|
|
||||||
"val_bad_dir = os.path.join(val_dir, 'bad')\n",
|
|
||||||
"num_train_good = len(os.listdir(train_good_dir))\n",
|
|
||||||
"num_train_bad = len(os.listdir(train_bad_dir))\n",
|
|
||||||
"num_val_good = len(os.listdir(val_good_dir))\n",
|
|
||||||
"num_val_bad = len(os.listdir(val_bad_dir))\n",
|
|
||||||
"total_train = num_train_good + num_train_bad\n",
|
|
||||||
"total_val = num_val_good + num_val_bad"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print('total training good images:', num_train_good)\n",
|
|
||||||
"print('total training bad images:', num_train_bad)\n",
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Total training images:\", total_train)\n",
|
|
||||||
"print('total validation good images:', num_val_good)\n",
|
|
||||||
"print('total validation bad images:', num_val_bad)\n",
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Total validation images:\", total_val)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Reduce training and validation set when testing\")\n",
|
|
||||||
"total_train = 16\n",
|
|
||||||
"total_val = 16\n",
|
|
||||||
"print(\"Reduced training images:\", total_train)\n",
|
|
||||||
"print(\"Reduced validation images:\", total_val)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"train_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")\n",
|
|
||||||
"val_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")\n",
|
|
||||||
"train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,\n",
|
|
||||||
" directory=train_dir,\n",
|
|
||||||
" shuffle=True,\n",
|
|
||||||
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
|
||||||
" class_mode='binary')\n",
|
|
||||||
"val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,\n",
|
|
||||||
" directory=val_dir,\n",
|
|
||||||
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
|
||||||
" class_mode='binary')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"sample_train_images, _ = next(train_data_gen)\n",
|
|
||||||
"sample_val_images, _ = next(val_data_gen)\n",
|
|
||||||
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
|
|
||||||
"def plotImages(images_arr):\n",
|
|
||||||
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
|
||||||
" axes = axes.flatten()\n",
|
|
||||||
" for img, ax in zip( images_arr, axes):\n",
|
|
||||||
" ax.imshow(img)\n",
|
|
||||||
" ax.axis('off')\n",
|
|
||||||
" plt.tight_layout()\n",
|
|
||||||
" plt.show()\n",
|
|
||||||
" \n",
|
|
||||||
"plotImages(sample_train_images[0:3])\n",
|
|
||||||
"plotImages(sample_val_images[0:3])"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%load_ext tensorboard\n",
|
|
||||||
"!rm -rf ./clusterlogs/\n",
|
|
||||||
"#log_dir=\"clusterlogs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n",
|
|
||||||
"log_dir=\"clusterlogs\"\n",
|
|
||||||
"#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n",
|
|
||||||
"tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir)\n",
|
|
||||||
"%tensorboard --logdir clusterlogs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#with multiworker_strategy.scope():\n",
|
|
||||||
"with mirrored_strategy.scope():\n",
|
|
||||||
" model = Sequential([\n",
|
|
||||||
" Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Conv2D(32, 3, padding='same', activation='relu'),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Conv2D(64, 3, padding='same', activation='relu'),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Flatten(),\n",
|
|
||||||
" Dense(512, activation='relu'),\n",
|
|
||||||
" Dense(1, activation='sigmoid')\n",
|
|
||||||
" ])\n",
|
|
||||||
" model.compile(optimizer='adam',\n",
|
|
||||||
" loss='binary_crossentropy',\n",
|
|
||||||
" metrics=['accuracy'])\n",
|
|
||||||
" history = model.fit_generator(\n",
|
|
||||||
" train_data_gen,\n",
|
|
||||||
" steps_per_epoch=total_train // batch_size,\n",
|
|
||||||
" epochs=epochs,\n",
|
|
||||||
" validation_data=val_data_gen,\n",
|
|
||||||
" validation_steps=total_val // batch_size,\n",
|
|
||||||
" verbose=2,\n",
|
|
||||||
" callbacks=[tensorboard_callback]\n",
|
|
||||||
" )"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#strategy.num_replicas_in_sync"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.summary()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"Image.LOAD_TRUNCATED_IMAGES = True"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"acc = history.history['accuracy']\n",
|
|
||||||
"val_acc = history.history['val_accuracy']\n",
|
|
||||||
"loss = history.history['loss']\n",
|
|
||||||
"val_loss = history.history['val_loss']\n",
|
|
||||||
"epochs_range = range(epochs)\n",
|
|
||||||
"plt.figure(figsize=(8, 8))\n",
|
|
||||||
"plt.subplot(1, 2, 1)\n",
|
|
||||||
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
|
|
||||||
"plt.plot(epochs_range, val_acc, label='Validation Accuracy')\n",
|
|
||||||
"plt.legend(loc='lower right')\n",
|
|
||||||
"plt.title('Training and Validation Accuracy')\n",
|
|
||||||
"plt.subplot(1, 2, 2)\n",
|
|
||||||
"plt.plot(epochs_range, loss, label='Training Loss')\n",
|
|
||||||
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
|
|
||||||
"plt.legend(loc='upper right')\n",
|
|
||||||
"plt.title('Training and Validation Loss')\n",
|
|
||||||
"plt.show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"TRAINING info\")\n",
|
|
||||||
"print(train_dir)\n",
|
|
||||||
"print(train_good_dir)\n",
|
|
||||||
"print(train_bad_dir)\n",
|
|
||||||
"print(train_image_generator)\n",
|
|
||||||
"print(train_data_gen)\n",
|
|
||||||
"#print(sample_train_images)\n",
|
|
||||||
"print(history)\n",
|
|
||||||
"model.to_json()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Save .tf model data here"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save('data/models/DUV/wut-train-cluster.tf')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save('data/models/DUV/wut-train-cluster.h5')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save_weights('data/models/DUV/wut-weights-train-cluster.tf')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save_weights('data/models/DUV/wut-weights-train-cluster.h5')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# The End"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.7.3"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 4
|
|
||||||
}
|
|
|
@ -3,22 +3,17 @@
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# wut-train --- What U Think? SatNOGS Observation AI, training application.\n",
|
"# wut-train --- What U Think? SatNOGS Observation AI, training application.\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"# Based on data/train and data/val directories builds a wut.h5 file."
|
"# Based on data/train and data/val directories builds a wut.h5 file.\n",
|
||||||
]
|
"#\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# GPLv3+"
|
"# GPLv3+"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -28,34 +23,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Built using Jupyter, Tensorflow, Keras"
|
"from __future__ import print_function\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"Start\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import os\n",
|
"import os\n",
|
||||||
"import datetime"
|
"import datetime\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import numpy as np"
|
"import numpy as np"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -65,7 +35,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import tensorflow.python.keras"
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -74,15 +44,23 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from tensorflow.python.keras import Sequential\n",
|
"import tensorflow as tf\n",
|
||||||
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
"from tensorflow import keras\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
"from tensorflow.keras import layers\n",
|
||||||
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
"from tensorflow.keras import optimizers\n",
|
||||||
"from tensorflow.python.keras import optimizers\n",
|
"from tensorflow.keras import Sequential\n",
|
||||||
"from tensorflow.python.keras.preprocessing import image\n",
|
"from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
"from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import load_img\n",
|
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import img_to_array"
|
"from tensorflow.keras.layers import Input, concatenate\n",
|
||||||
|
"from tensorflow.keras.models import load_model\n",
|
||||||
|
"from tensorflow.keras.models import Model\n",
|
||||||
|
"from tensorflow.keras.preprocessing import image\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import img_to_array\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import load_img\n",
|
||||||
|
"from tensorflow.keras.utils import model_to_dot\n",
|
||||||
|
"from tensorflow.keras.utils import plot_model"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -91,53 +69,15 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from tensorflow.python.keras.models import Model\n",
|
|
||||||
"from tensorflow.python.keras.layers import Input, concatenate"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Visualization\n",
|
|
||||||
"%matplotlib inline\n",
|
"%matplotlib inline\n",
|
||||||
"import matplotlib.pyplot as plt\n",
|
"import matplotlib.pyplot as plt\n",
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"from sklearn.decomposition import PCA"
|
"from sklearn.decomposition import PCA\n",
|
||||||
]
|
"import seaborn as sns\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Seaborn pip dependency\n",
|
|
||||||
"import seaborn as sns"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Interact\n",
|
|
||||||
"# https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html\n",
|
|
||||||
"from __future__ import print_function\n",
|
|
||||||
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
||||||
"import ipywidgets as widgets"
|
"import ipywidgets as widgets\n",
|
||||||
|
"from IPython.display import display, Image\n",
|
||||||
|
"from IPython.display import SVG"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -146,8 +86,13 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Display Images\n",
|
"#ENCODING='APT'\n",
|
||||||
"from IPython.display import display, Image"
|
"#ENCODING='BPSK1k2' # Fail\n",
|
||||||
|
"#ENCODING='FSK9k6'\n",
|
||||||
|
"#ENCODING='FM'\n",
|
||||||
|
"ENCODING='GMSK2k4'\n",
|
||||||
|
"#ENCODING='GMSK4k8'\n",
|
||||||
|
"#ENCODING='USB'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -156,18 +101,20 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Python import done\")"
|
"#batch_size = 8\n",
|
||||||
]
|
"#atch_size = 16\n",
|
||||||
},
|
"#atch_size = 32\n",
|
||||||
{
|
"batch_size = 64\n",
|
||||||
"cell_type": "code",
|
"#batch_size = 128\n",
|
||||||
"execution_count": null,
|
"#batch_size = 256\n",
|
||||||
"metadata": {},
|
"#epochs = 4\n",
|
||||||
"outputs": [],
|
"epochs = 8\n",
|
||||||
"source": [
|
"#IMG_WIDTH = 208\n",
|
||||||
"train_dir = os.path.join('data/', 'train')\n",
|
"#IMG_HEIGHT = 402\n",
|
||||||
"val_dir = os.path.join('data/', 'val')\n",
|
"IMG_WIDTH = 416\n",
|
||||||
"test_dir = os.path.join('data/', 'test')"
|
"IMG_HEIGHT = 803\n",
|
||||||
|
"#IMG_WIDTH = 823\n",
|
||||||
|
"#IMG_HEIGHT = 1603"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -176,55 +123,16 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"train_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING, 'train')\n",
|
||||||
|
"val_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING, 'val')\n",
|
||||||
"train_good_dir = os.path.join(train_dir, 'good')\n",
|
"train_good_dir = os.path.join(train_dir, 'good')\n",
|
||||||
"train_bad_dir = os.path.join(train_dir, 'bad')"
|
"train_bad_dir = os.path.join(train_dir, 'bad')\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"val_good_dir = os.path.join(val_dir, 'good')\n",
|
"val_good_dir = os.path.join(val_dir, 'good')\n",
|
||||||
"val_bad_dir = os.path.join(val_dir, 'bad')"
|
"val_bad_dir = os.path.join(val_dir, 'bad')\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"num_train_good = len(os.listdir(train_good_dir))\n",
|
"num_train_good = len(os.listdir(train_good_dir))\n",
|
||||||
"num_train_bad = len(os.listdir(train_bad_dir))"
|
"num_train_bad = len(os.listdir(train_bad_dir))\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"num_val_good = len(os.listdir(val_good_dir))\n",
|
"num_val_good = len(os.listdir(val_good_dir))\n",
|
||||||
"num_val_bad = len(os.listdir(val_bad_dir))"
|
"num_val_bad = len(os.listdir(val_bad_dir))\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"num_test = len(os.listdir(test_dir))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"total_train = num_train_good + num_train_bad\n",
|
"total_train = num_train_good + num_train_bad\n",
|
||||||
"total_val = num_val_good + num_val_bad"
|
"total_val = num_val_good + num_val_bad"
|
||||||
]
|
]
|
||||||
|
@ -235,10 +143,18 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print('total training good images:', num_train_good)\n",
|
"print('Training good images: ', num_train_good)\n",
|
||||||
"print('total training bad images:', num_train_bad)\n",
|
"print('Training bad images: ', num_train_bad)\n",
|
||||||
"print(\"--\")\n",
|
"print('Training images: ', total_train)\n",
|
||||||
"print(\"Total training images:\", total_train)"
|
"print('Validation good images: ', num_val_good)\n",
|
||||||
|
"print('Validation bad images: ', num_val_bad)\n",
|
||||||
|
"print('Validation images: ', total_val)\n",
|
||||||
|
"print('')\n",
|
||||||
|
"#print('Reduce training and validation set')\n",
|
||||||
|
"#total_train = 1000\n",
|
||||||
|
"#total_val = 1000\n",
|
||||||
|
"print('Training reduced to: ', total_train)\n",
|
||||||
|
"print('Validation reduced to: ', total_val)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -247,10 +163,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print('total validation good images:', num_val_good)\n",
|
"train_image_generator = ImageDataGenerator( rescale=1./255 )"
|
||||||
"print('total validation bad images:', num_val_bad)\n",
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Total validation images:\", total_val)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -259,81 +172,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Reduce training and validation set when testing\")\n",
|
"val_image_generator = ImageDataGenerator( rescale=1./255 )"
|
||||||
"#total_train = 100\n",
|
|
||||||
"#total_val = 100\n",
|
|
||||||
"print(\"Train =\")\n",
|
|
||||||
"print(total_train)\n",
|
|
||||||
"print(\"Validation =\")\n",
|
|
||||||
"print(total_val)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Good results\n",
|
|
||||||
"batch_size = 128\n",
|
|
||||||
"epochs = 6\n",
|
|
||||||
"#\n",
|
|
||||||
"# Large Test\n",
|
|
||||||
"#batch_size = 512 # FAIL\n",
|
|
||||||
"#batch_size = 256 # FAIL\n",
|
|
||||||
"#batch_size = 192 # BEST SO FAR\n",
|
|
||||||
"#epochs = 16 # BEST SO FAR\n",
|
|
||||||
"#\n",
|
|
||||||
"# Fast, but reasonable answers\n",
|
|
||||||
"#batch_size = 128\n",
|
|
||||||
"#epochs = 4\n",
|
|
||||||
"# Faster, but reasonable answers ?\n",
|
|
||||||
"#batch_size = 32\n",
|
|
||||||
"#epochs = 2\n",
|
|
||||||
"#\n",
|
|
||||||
"# Testing, faster more inaccurate results\n",
|
|
||||||
"#batch_size = 16\n",
|
|
||||||
"#epochs = 2\n",
|
|
||||||
"#\n",
|
|
||||||
"# Smallest set for testing\n",
|
|
||||||
"#batch_size = 1\n",
|
|
||||||
"#epochs = 1"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Half size\n",
|
|
||||||
"IMG_HEIGHT = 416\n",
|
|
||||||
"IMG_WIDTH= 804\n",
|
|
||||||
"# Full size, machine barfs probably needs more RAM\n",
|
|
||||||
"#IMG_HEIGHT = 832\n",
|
|
||||||
"#IMG_WIDTH = 1606"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"train_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"val_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -385,7 +224,6 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
|
|
||||||
"def plotImages(images_arr):\n",
|
"def plotImages(images_arr):\n",
|
||||||
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
||||||
" axes = axes.flatten()\n",
|
" axes = axes.flatten()\n",
|
||||||
|
@ -402,7 +240,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#plotImages(sample_train_images[0:3])"
|
"plotImages(sample_train_images[0:3])"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -411,7 +249,18 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#plotImages(sample_val_images[0:3])"
|
"plotImages(sample_val_images[0:3])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# If you need to kill tensorboad, when it says stuff like this:\n",
|
||||||
|
"# Reusing TensorBoard on port 6006 (pid 13650), started 0:04:20 ago. (Use '!kill 13650' to kill it.)\n",
|
||||||
|
"#!rm -rf /tmp/.tensorboard-info/"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -422,11 +271,18 @@
|
||||||
"source": [
|
"source": [
|
||||||
"%load_ext tensorboard\n",
|
"%load_ext tensorboard\n",
|
||||||
"!rm -rf ./logs/\n",
|
"!rm -rf ./logs/\n",
|
||||||
"#os.mkdir(\"logs\")\n",
|
"os.mkdir(\"logs\")\n",
|
||||||
"log_dir=\"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n",
|
"log_dir = \"logs\"\n",
|
||||||
"tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n",
|
"#log_dir=\"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")"
|
||||||
"#logdir = \"logs\"\n",
|
]
|
||||||
"#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=logdir)"
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, write_graph=True, write_images=True, embeddings_freq=1, update_freq='batch')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -454,9 +310,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model.compile(optimizer='adam',\n",
|
"wutoptimizer = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=True)\n",
|
||||||
" loss='binary_crossentropy',\n",
|
"wutloss = 'binary_crossentropy'\n",
|
||||||
" metrics=['accuracy'])"
|
"wutmetrics = ['accuracy']"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -465,7 +321,18 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model.summary()"
|
"model.compile(optimizer=wutoptimizer,\n",
|
||||||
|
" loss=wutloss,\n",
|
||||||
|
" metrics=[wutmetrics])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#model.summary()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -483,7 +350,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%tensorboard --logdir logs/fit"
|
"%tensorboard --logdir logs"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -492,14 +359,30 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"history = model.fit_generator(\n",
|
"print(train_data_gen)\n",
|
||||||
|
"print(total_train)\n",
|
||||||
|
"print(batch_size)\n",
|
||||||
|
"print(epochs)\n",
|
||||||
|
"print(val_data_gen)\n",
|
||||||
|
"print(total_val)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"history = model.fit(\n",
|
||||||
" train_data_gen,\n",
|
" train_data_gen,\n",
|
||||||
" steps_per_epoch=total_train // batch_size,\n",
|
" steps_per_epoch=total_train // batch_size,\n",
|
||||||
" epochs=epochs,\n",
|
" epochs=epochs,\n",
|
||||||
|
" verbose=1,\n",
|
||||||
" validation_data=val_data_gen,\n",
|
" validation_data=val_data_gen,\n",
|
||||||
" validation_steps=total_val // batch_size,\n",
|
" validation_steps=total_val // batch_size,\n",
|
||||||
|
" shuffle=True,\n",
|
||||||
" callbacks=[tensorboard_callback],\n",
|
" callbacks=[tensorboard_callback],\n",
|
||||||
" verbose=1\n",
|
" use_multiprocessing=False\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -509,14 +392,37 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"acc = history.history['accuracy']\n",
|
"acc = history.history['accuracy']"
|
||||||
"val_acc = history.history['val_accuracy']\n",
|
]
|
||||||
"\n",
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"val_acc = history.history['val_accuracy']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
"loss = history.history['loss']\n",
|
"loss = history.history['loss']\n",
|
||||||
"val_loss = history.history['val_loss']\n",
|
"val_loss = history.history['val_loss']\n",
|
||||||
"\n",
|
"\n",
|
||||||
"epochs_range = range(epochs)\n",
|
"epochs_range = range(epochs)\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"save_plot_dir = os.path.join('/srv/satnogs/data/models/', ENCODING)\n",
|
||||||
|
"os.makedirs(save_plot_dir, exist_ok=True)\n",
|
||||||
|
"plot_file=(\"wut-plot-\" + ENCODING + \".png\")\n",
|
||||||
|
"save_path_plot = os.path.join(save_plot_dir, plot_file)\n",
|
||||||
|
"print(save_path_plot)\n",
|
||||||
|
"\n",
|
||||||
"plt.figure(figsize=(8, 8))\n",
|
"plt.figure(figsize=(8, 8))\n",
|
||||||
"plt.subplot(1, 2, 1)\n",
|
"plt.subplot(1, 2, 1)\n",
|
||||||
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
|
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
|
||||||
|
@ -529,6 +435,7 @@
|
||||||
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
|
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
|
||||||
"plt.legend(loc='upper right')\n",
|
"plt.legend(loc='upper right')\n",
|
||||||
"plt.title('Training and Validation Loss')\n",
|
"plt.title('Training and Validation Loss')\n",
|
||||||
|
"plt.savefig(save_path_plot)\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -538,61 +445,13 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"TRAINING info\")"
|
"print(\"TRAINING info\")\n",
|
||||||
]
|
"print(train_dir)\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(train_dir)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(train_good_dir)\n",
|
"print(train_good_dir)\n",
|
||||||
"print(train_bad_dir)"
|
"print(train_bad_dir)\n",
|
||||||
]
|
"print(train_image_generator)\n",
|
||||||
},
|
"print(train_data_gen)\n",
|
||||||
{
|
"#print(sample_train_images)\n",
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(train_image_generator)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(train_data_gen)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#print(sample_train_images)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(history)"
|
"print(history)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -602,7 +461,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Save data here"
|
"h5_file=(\"wut-\" + ENCODING + \".h5\")\n",
|
||||||
|
"save_path_h5 = os.path.join('/srv/satnogs/data/models/', ENCODING, h5_file)\n",
|
||||||
|
"print(save_path_h5)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -611,7 +472,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model.save('data/models/wut-DUV.h5')"
|
"model.save(save_path_h5)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -620,7 +481,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model.save('data/models/wut-DUV.tf')"
|
"tf_modeldir=(\"wut-\" + ENCODING + \".tf\")\n",
|
||||||
|
"save_path_tf = os.path.join('/srv/satnogs/data/models/', ENCODING, tf_modeldir)\n",
|
||||||
|
"print(save_path_tf)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -629,13 +492,38 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# The End"
|
"model.save(save_path_tf)"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#plot_model(model, show_shapes=True, show_layer_names=True, expand_nested=True, dpi=72, to_file='/srv/satnogs/data/models/FM/plot_model.png')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#SVG(model_to_dot(model).create(prog='dot', format='svg'))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
|
@ -649,7 +537,7 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.3"
|
"version": "3.10.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
|
|
@ -6,28 +6,14 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# wut-web --- What U Think? Web App: SatNOGS Observation AI, makes predictions.\n",
|
"# wut-web-alpha --- What U Think? Web App: SatNOGS Observation AI, makes predictions. ALPHA.\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"# GPLv3+\n",
|
"# GPLv3+\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#from collections import defaultdict\n",
|
"#from collections import defaultdict\n",
|
||||||
"#import PIL as pil\n",
|
"#import PIL as pil"
|
||||||
"\n",
|
|
||||||
"import json\n",
|
|
||||||
"import os\n",
|
|
||||||
"import random\n",
|
|
||||||
"import tempfile\n",
|
|
||||||
"import shutil\n",
|
|
||||||
"import tensorflow as tf\n",
|
|
||||||
"import ipywidgets as wg\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"from IPython.display import display, Image\n",
|
|
||||||
"from IPython.utils import text\n",
|
|
||||||
"from PIL import Image as im\n",
|
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -36,7 +22,60 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"display(Image(filename='/srv/satnogs/satnogs-wut/pics/spacecruft-bk.png'))"
|
"import os"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import json\n",
|
||||||
|
"import random\n",
|
||||||
|
"import tempfile\n",
|
||||||
|
"import shutil\n",
|
||||||
|
"import tensorflow as tf\n",
|
||||||
|
"import ipywidgets as wg\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"from ipywidgets import HBox, Label\n",
|
||||||
|
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
||||||
|
"from ipywidgets import Layout, Button, Box\n",
|
||||||
|
"from ipywidgets import Layout, Button, Box, FloatText, Textarea, Dropdown, Label, IntSlider\n",
|
||||||
|
"from ipywidgets import AppLayout\n",
|
||||||
|
"from IPython.display import display, Image\n",
|
||||||
|
"from IPython.utils import text\n",
|
||||||
|
"from PIL import Image as im\n",
|
||||||
|
"from tensorflow.keras.models import load_model\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tf.get_logger().setLevel('ERROR')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def head_pic():\n",
|
||||||
|
" display(Image(filename='/srv/satnogs/satnogs-wut/pics/spacecruft-bk.png'))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -46,25 +85,9 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%HTML\n",
|
"%%HTML\n",
|
||||||
"<H1><B>wut?<B></H1>"
|
"<H1><B>wut? ALPHA DEVELOPMENT VERSION<B></H1>\n",
|
||||||
]
|
"Main site: <A HREF=\"https://wut.spacecruft.org/\">wut.spacecruft.org</A><BR>\n",
|
||||||
},
|
"Test site: <A HREF=\"https://wut-beta.spacecruft.org/\">wut-beta.spacecruft.org</A>"
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"wut? --- What U Think? SatNOGS Observation AI.\")\n",
|
|
||||||
"print(\"\")\n",
|
|
||||||
"print(\"wut is an AI that rates SatNOGS Observations good or bad.\")\n",
|
|
||||||
"print(\"The training model was built from DUV transmissions recorded by the\")\n",
|
|
||||||
"print(\"SatNOGS network in December, 2019.\")\n",
|
|
||||||
"print(\"The plan is to have models of all SatNOGS modes (65 at present),\")\n",
|
|
||||||
"print(\"and you can enter an arbitrary Observation ID and the AI will return a rating.\")\n",
|
|
||||||
"print(\"\")\n",
|
|
||||||
"print(\"Source Code:\")\n",
|
|
||||||
"print(\"https://spacecruft.org/spacecruft/satnogs-wut\")"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -77,11 +100,9 @@
|
||||||
"IMG_WIDTH = 804\n",
|
"IMG_WIDTH = 804\n",
|
||||||
"batch_size = 32\n",
|
"batch_size = 32\n",
|
||||||
"minobsid = 1292461\n",
|
"minobsid = 1292461\n",
|
||||||
"#maxobsid = 1470525\n",
|
"maxobsid = 1470525\n",
|
||||||
"maxobsid = 1591638\n",
|
"#maxobsid = 1591638\n",
|
||||||
"base_dir = ('/srv/wut/data')\n",
|
"base_dir = ('/srv/wut/data')"
|
||||||
"sample_dir = ('/srv/wut/data/test/unvetted')\n",
|
|
||||||
"model_file = os.path.join(base_dir, 'models', 'wut-DUV-201912.tf')"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -90,8 +111,18 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%capture\n",
|
"def site_intro():\n",
|
||||||
"model = load_model(model_file)"
|
" print(\"wut? --- What U Think? SatNOGS Observation AI development version.\")\n",
|
||||||
|
" print(\"Source Code: https://spacecruft.org/spacecruft/satnogs-wut\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#site_intro()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -108,7 +139,8 @@
|
||||||
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
||||||
" shuffle=True,\n",
|
" shuffle=True,\n",
|
||||||
" class_mode='binary')\n",
|
" class_mode='binary')\n",
|
||||||
" return test_data_gen"
|
" return test_data_gen\n",
|
||||||
|
"# Get rid of, but %%capture fails: Found 1 images belonging to 1 classes."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -117,7 +149,6 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%capture\n",
|
|
||||||
"def rm_image_tmp(test_dir):\n",
|
"def rm_image_tmp(test_dir):\n",
|
||||||
" #print('Not removed:', test_dir)\n",
|
" #print('Not removed:', test_dir)\n",
|
||||||
" shutil.rmtree(test_dir)"
|
" shutil.rmtree(test_dir)"
|
||||||
|
@ -129,15 +160,15 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%capture --no-stderr --no-stdout\n",
|
"def gen_image_tmp(obs_waterfalltmp, showwater):\n",
|
||||||
"def gen_image_tmp(obs_waterfalltmp):\n",
|
|
||||||
" tmp_dir = tempfile.mkdtemp()\n",
|
" tmp_dir = tempfile.mkdtemp()\n",
|
||||||
" test_dir = os.path.join(tmp_dir)\n",
|
" test_dir = os.path.join(tmp_dir)\n",
|
||||||
" os.makedirs(test_dir + '/unvetted', exist_ok=True)\n",
|
" os.makedirs(test_dir + '/unvetted', exist_ok=True)\n",
|
||||||
" shutil.copy(obs_waterfalltmp, test_dir + '/unvetted/') \n",
|
" shutil.copy(obs_waterfalltmp, test_dir + '/unvetted/') \n",
|
||||||
" \n",
|
" \n",
|
||||||
" img = im.open(obs_waterfalltmp).resize( (100,200))\n",
|
" img = im.open(obs_waterfalltmp).resize( (100,200))\n",
|
||||||
" display(img)\n",
|
" if showwater == True:\n",
|
||||||
|
" display(img)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" return test_dir"
|
" return test_dir"
|
||||||
]
|
]
|
||||||
|
@ -148,8 +179,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%capture\n",
|
"def obs_wutsay(test_data_gen, model):\n",
|
||||||
"def obs_wutsay(test_data_gen):\n",
|
|
||||||
" prediction = model.predict(\n",
|
" prediction = model.predict(\n",
|
||||||
" x=test_data_gen,\n",
|
" x=test_data_gen,\n",
|
||||||
" verbose=0)\n",
|
" verbose=0)\n",
|
||||||
|
@ -197,21 +227,48 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%capture\n",
|
"def wutObs(datObs, showwater):\n",
|
||||||
"def doallthethings(datObs):\n",
|
" if int(datObs) > ( minobsid - 1 ) and int(datObs) < ( maxobsid + 1):\n",
|
||||||
|
" doallthethings(datObs, showwater)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def doallthethings(datObs, showwater):\n",
|
||||||
"\n",
|
"\n",
|
||||||
" obs_waterfall=get_obs_var('waterfall', datObs) \n",
|
" obs_waterfall=get_obs_var('waterfall', datObs) \n",
|
||||||
" obs_waterfallpic=os.path.basename(obs_waterfall)\n",
|
" obs_waterfallpic=os.path.basename(obs_waterfall)\n",
|
||||||
" obs_waterfalltmp = os.path.join('/srv/satnogs/download', str(get_obs_var('id', datObs)), obs_waterfallpic)\n",
|
" obs_waterfalltmp = os.path.join('/srv/satnogs/download', str(get_obs_var('id', datObs)), obs_waterfallpic)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" test_dir=gen_image_tmp(obs_waterfalltmp);\n",
|
"# XXX NameError\n",
|
||||||
|
" txmode='DUV'\n",
|
||||||
|
" model=wut_model(txmode)\n",
|
||||||
|
" \n",
|
||||||
|
" if get_obs_var('transmitter_mode', datObs) == 'DUV':\n",
|
||||||
|
" txmode='DUV'\n",
|
||||||
|
" elif get_obs_var('transmitter_mode', datObs) == 'CW':\n",
|
||||||
|
" txmode='CW'\n",
|
||||||
|
" else:\n",
|
||||||
|
" txmode='DUV'\n",
|
||||||
|
" \n",
|
||||||
|
" test_dir=gen_image_tmp(obs_waterfalltmp, showwater);\n",
|
||||||
" test_data_gen=gen_image(obs_waterfalltmp, test_dir);\n",
|
" test_data_gen=gen_image(obs_waterfalltmp, test_dir);\n",
|
||||||
" \n",
|
"\n",
|
||||||
" prediction_bool=obs_wutsay(test_data_gen);\n",
|
"# XXX NameError: name 'model' is not defined\n",
|
||||||
|
" prediction_bool=obs_wutsay(test_data_gen, model);\n",
|
||||||
"\n",
|
"\n",
|
||||||
" print()\n",
|
" print()\n",
|
||||||
" print('Observation ID: ', get_obs_var('id', datObs))\n",
|
" print('Observation ID: ', get_obs_var('id', datObs))\n",
|
||||||
" print('Encoding: ', get_obs_var('transmitter_mode', datObs))\n",
|
" print('Encoding: ', get_obs_var('transmitter_mode', datObs),end='')\n",
|
||||||
|
" if get_obs_var('transmitter_mode', datObs) == 'DUV':\n",
|
||||||
|
" XXX=0\n",
|
||||||
|
" print(\" -- Using DUV training model.\")\n",
|
||||||
|
" else:\n",
|
||||||
|
" print(\" -- wut has not been trained on\", get_obs_var('transmitter_mode', datObs), \"encodings.\")\n",
|
||||||
" print('Human rating: ', get_obs_var('vetted_status', datObs))\n",
|
" print('Human rating: ', get_obs_var('vetted_status', datObs))\n",
|
||||||
" if prediction_bool[0] == False:\n",
|
" if prediction_bool[0] == False:\n",
|
||||||
" rating = 'bad'\n",
|
" rating = 'bad'\n",
|
||||||
|
@ -219,10 +276,6 @@
|
||||||
" rating = 'good'\n",
|
" rating = 'good'\n",
|
||||||
" print('wut AI rating: %s' % (rating)) \n",
|
" print('wut AI rating: %s' % (rating)) \n",
|
||||||
" print()\n",
|
" print()\n",
|
||||||
" if get_obs_var('transmitter_mode', datObs) == 'DUV':\n",
|
|
||||||
" print(\"Using DUV training model.\")\n",
|
|
||||||
" else:\n",
|
|
||||||
" print(\"NOTE: wut has not been trained on\", get_obs_var('transmitter_mode', datObs), \"encodings.\")\n",
|
|
||||||
" print('https://network.satnogs.org/observations/' + str(get_obs_var('id', datObs)))\n",
|
" print('https://network.satnogs.org/observations/' + str(get_obs_var('id', datObs)))\n",
|
||||||
" #!cat $obsjsonfile\n",
|
" #!cat $obsjsonfile\n",
|
||||||
" rm_image_tmp(test_dir)"
|
" rm_image_tmp(test_dir)"
|
||||||
|
@ -234,10 +287,11 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%capture\n",
|
"def wut_model(txmode):\n",
|
||||||
"def wutObs(datObs):\n",
|
" model_file = os.path.join(base_dir, 'models', (txmode), 'wut-train-cluster.tf')\n",
|
||||||
" if int(datObs) > ( minobsid - 1 ) and int(datObs) < ( maxobsid + 1):\n",
|
" model = load_model(model_file)\n",
|
||||||
" doallthethings(datObs)"
|
" \n",
|
||||||
|
" return model"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -246,11 +300,12 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%capture\n",
|
"def display_main():\n",
|
||||||
"def display_results():\n",
|
|
||||||
" print('Enter an Observation ID between', minobsid, 'and', maxobsid)\n",
|
" print('Enter an Observation ID between', minobsid, 'and', maxobsid)\n",
|
||||||
" wutObs_slide = wg.IntText(value='1292461')\n",
|
" rand_obsid=random.randint(minobsid,maxobsid)\n",
|
||||||
" wg.interact(wutObs, datObs=wutObs_slide)"
|
" wutObs_slide = wg.IntText(value=rand_obsid, description=' ')\n",
|
||||||
|
" wutObs_check = wg.Checkbox(value=True, disabled=False)\n",
|
||||||
|
" wg.interact(wutObs, datObs=wutObs_slide, showwater=wutObs_check.value)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -259,13 +314,59 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"display_results()"
|
"head_pic()"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"site_intro()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def load_txmode_models():\n",
|
||||||
|
" model_txmode_DUV=wut_model('DUV')\n",
|
||||||
|
" model_txmode_CW=wut_model('CW')\n",
|
||||||
|
" model_txmode_other=wut_model('DUV')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"load_txmode_models()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"display_main()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
|
@ -279,7 +380,7 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.3"
|
"version": "3.10.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
|
|
@ -13,10 +13,34 @@
|
||||||
"# GPLv3+\n",
|
"# GPLv3+\n",
|
||||||
"\n",
|
"\n",
|
||||||
"#from collections import defaultdict\n",
|
"#from collections import defaultdict\n",
|
||||||
"#import PIL as pil\n",
|
"#import PIL as pil"
|
||||||
"\n",
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"import os\n",
|
|
||||||
"import random\n",
|
"import random\n",
|
||||||
"import tempfile\n",
|
"import tempfile\n",
|
||||||
"import shutil\n",
|
"import shutil\n",
|
||||||
|
@ -26,8 +50,17 @@
|
||||||
"from IPython.display import display, Image\n",
|
"from IPython.display import display, Image\n",
|
||||||
"from IPython.utils import text\n",
|
"from IPython.utils import text\n",
|
||||||
"from PIL import Image as im\n",
|
"from PIL import Image as im\n",
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
"from tensorflow.keras.models import load_model\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator"
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tf.get_logger().setLevel('ERROR')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -46,25 +79,9 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%HTML\n",
|
"%%HTML\n",
|
||||||
"<H1><B>wut?<B></H1>"
|
"<H1><B>wut? BETA TEST VERSION<B></H1>\n",
|
||||||
]
|
"Main site: <A HREF=\"https://wut.spacecruft.org/\">wut.spacecruft.org</A><BR>\n",
|
||||||
},
|
"Development site: <A HREF=\"https://wut-alpha.spacecruft.org/\">wut-alpha.spacecruft.org</A>"
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"wut? --- What U Think? SatNOGS Observation AI.\")\n",
|
|
||||||
"print(\"\")\n",
|
|
||||||
"print(\"wut is an AI that rates SatNOGS Observations good or bad.\")\n",
|
|
||||||
"print(\"The training model was built from DUV transmissions recorded by the\")\n",
|
|
||||||
"print(\"SatNOGS network in December, 2019.\")\n",
|
|
||||||
"print(\"The plan is to have models of all SatNOGS modes (65 at present),\")\n",
|
|
||||||
"print(\"and you can enter an arbitrary Observation ID and the AI will return a rating.\")\n",
|
|
||||||
"print(\"\")\n",
|
|
||||||
"print(\"Source Code:\")\n",
|
|
||||||
"print(\"https://spacecruft.org/spacecruft/satnogs-wut\")"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -77,13 +94,31 @@
|
||||||
"IMG_WIDTH = 804\n",
|
"IMG_WIDTH = 804\n",
|
||||||
"batch_size = 32\n",
|
"batch_size = 32\n",
|
||||||
"minobsid = 1292461\n",
|
"minobsid = 1292461\n",
|
||||||
"#maxobsid = 1470525\n",
|
"maxobsid = 1470525\n",
|
||||||
"maxobsid = 1591638\n",
|
|
||||||
"base_dir = ('/srv/wut/data')\n",
|
"base_dir = ('/srv/wut/data')\n",
|
||||||
"sample_dir = ('/srv/wut/data/test/unvetted')\n",
|
"sample_dir = ('/srv/wut/data/test/unvetted')\n",
|
||||||
"model_file = os.path.join(base_dir, 'models', 'wut-DUV-201912.tf')"
|
"model_file = os.path.join(base_dir, 'models', 'wut-DUV-201912.tf')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def site_intro():\n",
|
||||||
|
" print(\"wut? --- What U Think? SatNOGS Observation AI.\")\n",
|
||||||
|
" print(\"\")\n",
|
||||||
|
" print(\"wut is an AI that rates SatNOGS Observations good or bad.\")\n",
|
||||||
|
" print(\"The training model was built from DUV transmissions recorded by the\")\n",
|
||||||
|
" print(\"SatNOGS network in December, 2019.\")\n",
|
||||||
|
" print(\"The plan is to have models of all SatNOGS modes (65 at present),\")\n",
|
||||||
|
" print(\"and you can enter an arbitrary Observation ID and the AI will return a rating.\")\n",
|
||||||
|
" print(\"\")\n",
|
||||||
|
" print(\"Source Code:\")\n",
|
||||||
|
" print(\"https://spacecruft.org/spacecruft/satnogs-wut\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
|
@ -91,7 +126,10 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%capture\n",
|
"%%capture\n",
|
||||||
"model = load_model(model_file)"
|
"def wut_model(model_file):\n",
|
||||||
|
" model = load_model(model_file)\n",
|
||||||
|
" \n",
|
||||||
|
" return model"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -247,9 +285,10 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%capture\n",
|
"%%capture\n",
|
||||||
"def display_results():\n",
|
"def display_main():\n",
|
||||||
" print('Enter an Observation ID between', minobsid, 'and', maxobsid)\n",
|
" print('Enter an Observation ID between', minobsid, 'and', maxobsid)\n",
|
||||||
" wutObs_slide = wg.IntText(value='1292461')\n",
|
" rand_obsid=random.randint(minobsid,maxobsid)\n",
|
||||||
|
" wutObs_slide = wg.IntText(value=rand_obsid)\n",
|
||||||
" wg.interact(wutObs, datObs=wutObs_slide)"
|
" wg.interact(wutObs, datObs=wutObs_slide)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -259,13 +298,31 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"display_results()"
|
"site_intro()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model=wut_model(model_file)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"display_main()"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
|
@ -279,7 +336,7 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.3"
|
"version": "3.10.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
|
|
@ -1,287 +0,0 @@
|
||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# wut-web --- What U Think? Web App: SatNOGS Observation AI, makes predictions.\n",
|
|
||||||
"#\n",
|
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
|
||||||
"#\n",
|
|
||||||
"# GPLv3+\n",
|
|
||||||
"\n",
|
|
||||||
"#from collections import defaultdict\n",
|
|
||||||
"#import PIL as pil\n",
|
|
||||||
"\n",
|
|
||||||
"import json\n",
|
|
||||||
"import os\n",
|
|
||||||
"import random\n",
|
|
||||||
"import tempfile\n",
|
|
||||||
"import shutil\n",
|
|
||||||
"import tensorflow as tf\n",
|
|
||||||
"import ipywidgets as wg\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"from IPython.display import display, Image\n",
|
|
||||||
"from IPython.utils import text\n",
|
|
||||||
"from PIL import Image as im\n",
|
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"display(Image(filename='/srv/satnogs/satnogs-wut/pics/spacecruft-bk.png'))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%%HTML\n",
|
|
||||||
"<H1><B>wut?<B></H1>"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"wut? --- What U Think? SatNOGS Observation AI.\")\n",
|
|
||||||
"print(\"\")\n",
|
|
||||||
"print(\"wut is an AI that rates SatNOGS Observations good or bad.\")\n",
|
|
||||||
"print(\"The training model was built from DUV transmissions recorded by the\")\n",
|
|
||||||
"print(\"SatNOGS network in December, 2019.\")\n",
|
|
||||||
"print(\"The plan is to have models of all SatNOGS modes (65 at present),\")\n",
|
|
||||||
"print(\"and you can enter an arbitrary Observation ID and the AI will return a rating.\")\n",
|
|
||||||
"print(\"\")\n",
|
|
||||||
"print(\"Source Code:\")\n",
|
|
||||||
"print(\"https://spacecruft.org/spacecruft/satnogs-wut\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 19,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"IMG_HEIGHT = 416\n",
|
|
||||||
"IMG_WIDTH = 804\n",
|
|
||||||
"batch_size = 32\n",
|
|
||||||
"minobsid = 1292461\n",
|
|
||||||
"maxobsid = 1470525\n",
|
|
||||||
"#maxobsid = 1591638 # 2020-01-24\n",
|
|
||||||
"base_dir = ('/srv/wut/data')\n",
|
|
||||||
"sample_dir = ('/srv/wut/data/test/unvetted')\n",
|
|
||||||
"model_file = os.path.join(base_dir, 'models', 'wut-DUV-201912.tf')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%%capture\n",
|
|
||||||
"model = load_model(model_file)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%%capture\n",
|
|
||||||
"def gen_image(test_data_gen,test_dir):\n",
|
|
||||||
" test_image_gen = ImageDataGenerator(rescale=1./255);\n",
|
|
||||||
" test_data_gen = test_image_gen.flow_from_directory(batch_size=1,\n",
|
|
||||||
" directory=test_dir,\n",
|
|
||||||
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
|
||||||
" shuffle=True,\n",
|
|
||||||
" class_mode='binary')\n",
|
|
||||||
" return test_data_gen"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%%capture\n",
|
|
||||||
"def rm_image_tmp(test_dir):\n",
|
|
||||||
" #print('Not removed:', test_dir)\n",
|
|
||||||
" shutil.rmtree(test_dir)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%%capture --no-stderr --no-stdout\n",
|
|
||||||
"def gen_image_tmp(obs_waterfalltmp):\n",
|
|
||||||
" tmp_dir = tempfile.mkdtemp()\n",
|
|
||||||
" test_dir = os.path.join(tmp_dir)\n",
|
|
||||||
" os.makedirs(test_dir + '/unvetted', exist_ok=True)\n",
|
|
||||||
" shutil.copy(obs_waterfalltmp, test_dir + '/unvetted/') \n",
|
|
||||||
" \n",
|
|
||||||
" img = im.open(obs_waterfalltmp).resize( (100,200))\n",
|
|
||||||
" display(img)\n",
|
|
||||||
"\n",
|
|
||||||
" return test_dir"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%%capture\n",
|
|
||||||
"def obs_wutsay(test_data_gen):\n",
|
|
||||||
" prediction = model.predict(\n",
|
|
||||||
" x=test_data_gen,\n",
|
|
||||||
" verbose=0)\n",
|
|
||||||
" predictions=[]\n",
|
|
||||||
" prediction_bool = (prediction >0.8)\n",
|
|
||||||
" predictions = prediction_bool.astype(int)\n",
|
|
||||||
" \n",
|
|
||||||
" return prediction_bool"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def get_obs_dict(datObs):\n",
|
|
||||||
" obsjsonfile=('/srv/satnogs/download/' + format(datObs) + '/' + format(datObs) + '.json')\n",
|
|
||||||
" with open(obsjsonfile) as f:\n",
|
|
||||||
" content = f.read()\n",
|
|
||||||
" data = json.loads(content)\n",
|
|
||||||
" res = {x : data[x] for x in range(len(data))}\n",
|
|
||||||
" res2 = dict(enumerate(data))\n",
|
|
||||||
" obs_dict=(res2[0])\n",
|
|
||||||
" \n",
|
|
||||||
" return obs_dict"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def get_obs_var(var, datObs):\n",
|
|
||||||
" obs_dict=get_obs_dict(datObs);\n",
|
|
||||||
" obs_var=(obs_dict[(var)])\n",
|
|
||||||
" \n",
|
|
||||||
" return obs_var"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%%capture\n",
|
|
||||||
"def doallthethings(datObs):\n",
|
|
||||||
"\n",
|
|
||||||
" obs_waterfall=get_obs_var('waterfall', datObs) \n",
|
|
||||||
" obs_waterfallpic=os.path.basename(obs_waterfall)\n",
|
|
||||||
" obs_waterfalltmp = os.path.join('/srv/satnogs/download', str(get_obs_var('id', datObs)), obs_waterfallpic)\n",
|
|
||||||
"\n",
|
|
||||||
" test_dir=gen_image_tmp(obs_waterfalltmp);\n",
|
|
||||||
" test_data_gen=gen_image(obs_waterfalltmp, test_dir);\n",
|
|
||||||
" \n",
|
|
||||||
" prediction_bool=obs_wutsay(test_data_gen);\n",
|
|
||||||
"\n",
|
|
||||||
" print()\n",
|
|
||||||
" print('Observation ID: ', get_obs_var('id', datObs))\n",
|
|
||||||
" print('Encoding: ', get_obs_var('transmitter_mode', datObs))\n",
|
|
||||||
" print('Human rating: ', get_obs_var('vetted_status', datObs))\n",
|
|
||||||
" if prediction_bool[0] == False:\n",
|
|
||||||
" rating = 'bad'\n",
|
|
||||||
" else:\n",
|
|
||||||
" rating = 'good'\n",
|
|
||||||
" print('wut AI rating: %s' % (rating)) \n",
|
|
||||||
" print()\n",
|
|
||||||
" if get_obs_var('transmitter_mode', datObs) == 'DUV':\n",
|
|
||||||
" print(\"Using DUV training model.\")\n",
|
|
||||||
" else:\n",
|
|
||||||
" print(\"NOTE: wut has not been trained on\", get_obs_var('transmitter_mode', datObs), \"encodings.\")\n",
|
|
||||||
" print('https://network.satnogs.org/observations/' + str(get_obs_var('id', datObs)))\n",
|
|
||||||
" #!cat $obsjsonfile\n",
|
|
||||||
" rm_image_tmp(test_dir)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%%capture\n",
|
|
||||||
"def wutObs(datObs):\n",
|
|
||||||
" if int(datObs) > ( minobsid - 1 ) and int(datObs) < ( maxobsid + 1):\n",
|
|
||||||
" doallthethings(datObs)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%%capture\n",
|
|
||||||
"def display_results():\n",
|
|
||||||
" print('Enter an Observation ID between', minobsid, 'and', maxobsid)\n",
|
|
||||||
" wutObs_slide = wg.IntText(value='1292461')\n",
|
|
||||||
" wg.interact(wutObs, datObs=wutObs_slide)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"display_results()"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.7.3"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 4
|
|
||||||
}
|
|
|
@ -3,25 +3,69 @@
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# wut-web --- What U Think? Web App: SatNOGS Observation AI, makes predictions.\n",
|
"# wut-web --- What U Think? Web App: SatNOGS Observation AI, makes predictions.\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"# GPLv3+\n",
|
"# GPLv3+"
|
||||||
"\n",
|
]
|
||||||
"import os\n",
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
"import random\n",
|
"import random\n",
|
||||||
"import tempfile\n",
|
"import tempfile\n",
|
||||||
"import shutil\n",
|
"import shutil\n",
|
||||||
"import tensorflow as tf\n",
|
"import tensorflow as tf\n",
|
||||||
|
"from tensorflow.keras.models import load_model\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
||||||
"from IPython.display import display, Image\n",
|
"from IPython.display import display, Image\n",
|
||||||
"from IPython.utils import text\n",
|
"from IPython.utils import text"
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
]
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
},
|
||||||
"\n",
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tf.get_logger().setLevel('ERROR')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
"display(Image(filename='/srv/satnogs/satnogs-wut/pics/spacecruft-bk.png'))"
|
"display(Image(filename='/srv/satnogs/satnogs-wut/pics/spacecruft-bk.png'))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -158,7 +202,7 @@
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
|
@ -172,7 +216,7 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.3"
|
"version": "3.10.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
|
|
@ -63,6 +63,15 @@
|
||||||
"import os"
|
"import os"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
|
@ -78,43 +87,23 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import tensorflow.python.keras"
|
"import tensorflow as tf\n",
|
||||||
]
|
"from tensorflow import keras\n",
|
||||||
},
|
"from tensorflow.keras import layers\n",
|
||||||
{
|
"from tensorflow.keras import optimizers\n",
|
||||||
"cell_type": "code",
|
"from tensorflow.keras import Sequential\n",
|
||||||
"execution_count": null,
|
"from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
||||||
"metadata": {},
|
"from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
||||||
"outputs": [],
|
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
|
||||||
"source": [
|
"from tensorflow.keras.layers import Input, concatenate\n",
|
||||||
"from tensorflow.python.keras import Sequential\n",
|
"from tensorflow.keras.models import load_model\n",
|
||||||
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
"from tensorflow.keras.models import Model\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
"from tensorflow.keras.preprocessing import image\n",
|
||||||
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
||||||
"from tensorflow.python.keras import optimizers\n",
|
"from tensorflow.keras.preprocessing.image import img_to_array\n",
|
||||||
"from tensorflow.python.keras.preprocessing import image\n",
|
"from tensorflow.keras.preprocessing.image import load_img\n",
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
"from tensorflow.keras.utils import model_to_dot\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import load_img\n",
|
"from tensorflow.keras.utils import plot_model"
|
||||||
"from tensorflow.python.keras.preprocessing.image import img_to_array"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from tensorflow.python.keras.models import Model\n",
|
|
||||||
"from tensorflow.python.keras.layers import Input, concatenate"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -178,9 +167,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"train_dir = os.path.join('data/', 'train')\n",
|
"train_dir = os.path.join('/srv/satnogs/data/', 'train')\n",
|
||||||
"val_dir = os.path.join('data/', 'val')\n",
|
"val_dir = os.path.join('/srv/satnogs/data/', 'val')\n",
|
||||||
"test_dir = os.path.join('data/', 'test')"
|
"test_dir = os.path.join('/srv/satnogs/data/', 'test')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -752,7 +741,7 @@
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
|
@ -766,7 +755,7 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.3"
|
"version": "3.10.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
opencv-python
|
||||||
|
pandas
|
||||||
|
pillow
|
||||||
|
portpicker
|
||||||
|
setuptools
|
||||||
|
simplejson
|
||||||
|
tensorflow_cpu
|
||||||
|
#tensorflow
|
||||||
|
#tensorflow_gpu
|
|
@ -0,0 +1,15 @@
|
||||||
|
ipython_blocking
|
||||||
|
ipywidgets
|
||||||
|
jupyterlab
|
||||||
|
matplotlib
|
||||||
|
pandas
|
||||||
|
pillow
|
||||||
|
pydot
|
||||||
|
seaborn
|
||||||
|
simplejson
|
||||||
|
sklearn
|
||||||
|
voila
|
||||||
|
|
||||||
|
tensorflow_cpu
|
||||||
|
#tensorflow
|
||||||
|
#tensorflow_gpu
|
|
@ -0,0 +1,15 @@
|
||||||
|
black[jupyter]
|
||||||
|
internetarchive
|
||||||
|
ipywidgets
|
||||||
|
jupyterlab
|
||||||
|
matplotlib
|
||||||
|
pandas
|
||||||
|
pydot
|
||||||
|
seaborn
|
||||||
|
sklearn
|
||||||
|
tensorboard
|
||||||
|
tensorboard-plugin-profile
|
||||||
|
|
||||||
|
tensorflow_cpu
|
||||||
|
#tensorflow_gpu
|
||||||
|
#tensorflow
|
|
@ -0,0 +1,7 @@
|
||||||
|
all:
|
||||||
|
mkdir -p ../bin
|
||||||
|
cp -p wut wut-aria-active wut-aria-add wut-aria-daemon wut-aria-info wut-aria-methods wut-aria-shutdown wut-aria-stat wut-aria-stopped wut-aria-waiting wut-audio-archive wut-audio-sha1 wut-compare wut-compare-all wut-compare-tx wut-compare-txmode wut-compare-txmode-csv wut-dl-sort wut-dl-sort-tx wut-dl-sort-txmode wut-dl-sort-txmode-all wut-files wut-files-data wut-files-data-all wut-ia-sha1 wut-ia-torrents wut-img-ck.py wut-ml wut-ml-auto wut-ml-load wut-ml-save wut-obs wut-ogg2wav wut-review-staging wut-rm-random wut-tf wut-tf.py wut-water wut-water-range wut-worker wut-worker-mas wut-worker-mas.py wut-worker.py ../bin/
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -fr ../bin
|
||||||
|
|
|
@ -9,15 +9,17 @@
|
||||||
# Example:
|
# Example:
|
||||||
# wut 1456893
|
# wut 1456893
|
||||||
|
|
||||||
|
cd /srv/satnogs
|
||||||
|
|
||||||
OBSID="$1"
|
OBSID="$1"
|
||||||
|
|
||||||
rm -rf data/test
|
rm -rf data/test
|
||||||
mkdir -p data/test/unvetted
|
mkdir -p data/test/unvetted
|
||||||
|
|
||||||
./wut-water $OBSID
|
wut-water $OBSID
|
||||||
|
|
||||||
[ -f download/$OBSID/waterfall_$OBSID_*.png ] || echo "failed"
|
[ -f download/$OBSID/waterfall_$OBSID_*.png ] || echo "failed"
|
||||||
[ -f download/$OBSID/waterfall_$OBSID_*.png ] || exit
|
[ -f download/$OBSID/waterfall_$OBSID_*.png ] || exit
|
||||||
cp -p download/$OBSID/waterfall_$OBSID_*.png data/test/unvetted/
|
cp -p download/$OBSID/waterfall_$OBSID_*.png data/test/unvetted/
|
||||||
./wut-ml 2>/dev/null | grep -e ^Observation -e "^\[\[" | sed -e 's/\[\[//' -e 's/\]\]//' -e 's/Observation: //g'
|
wut-ml 2>/dev/null | grep -e ^Observation -e "^\[\[" | sed -e 's/\[\[//' -e 's/\]\]//' -e 's/Observation: //g'
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
active=s.aria2.tellActive("token:yajnuAdCemNathNojdi")
|
||||||
|
|
||||||
|
pprint(active)
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
# All torrents
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-*/satnogs-observations-*_archive.torrent')))
|
||||||
|
|
||||||
|
# Added torrents
|
||||||
|
# dt-10
|
||||||
|
torrents=sorted(list(path.glob('**/satnogs-observations-000000001-000010000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0001?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0002?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0003?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0004?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0005?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0006?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0007?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0008?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0009?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
|
||||||
|
for i in torrents:
|
||||||
|
print(i.name)
|
||||||
|
s.aria2.addTorrent("token:yajnuAdCemNathNojdi",
|
||||||
|
xmlrpclib.Binary(open(i, mode='rb').read()))
|
||||||
|
time.sleep(10)
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
mkdir -p ~/log /srv/dl
|
||||||
|
|
||||||
|
ulimit -n 8192
|
||||||
|
|
||||||
|
aria2c \
|
||||||
|
--daemon=true \
|
||||||
|
--enable-rpc=true \
|
||||||
|
--dir=/srv/dl \
|
||||||
|
--rpc-listen-port=4800 \
|
||||||
|
--rpc-listen-all=false \
|
||||||
|
--rpc-secret=`cat /home/jebba/.aria-secret` \
|
||||||
|
--disable-ipv6=true \
|
||||||
|
--disk-cache=128M \
|
||||||
|
--file-allocation=falloc \
|
||||||
|
--log-level=notice \
|
||||||
|
--log=/home/jebba/log/aria.log \
|
||||||
|
--bt-max-open-files=1000 \
|
||||||
|
--bt-max-peers=1000 \
|
||||||
|
--continue=true \
|
||||||
|
--follow-torrent=mem \
|
||||||
|
--rpc-save-upload-metadata=false \
|
||||||
|
--max-concurrent-downloads=100 \
|
||||||
|
--bt-max-open-files=50000 \
|
||||||
|
--bt-max-peers=0 \
|
||||||
|
--allow-overwrite=true \
|
||||||
|
--max-download-result=0 \
|
||||||
|
--enable-mmap=true
|
||||||
|
|
||||||
|
exit
|
||||||
|
|
||||||
|
--deferred-input=true \
|
||||||
|
--enable-mmap
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
info=s.aria2.getSessionInfo("token:yajnuAdCemNathNojdi")
|
||||||
|
|
||||||
|
pprint(info)
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
methods=s.system.listMethods()
|
||||||
|
|
||||||
|
pprint((sorted)(methods))
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
shutdown=s.aria2.shutdown("token:yajnuAdCemNathNojdi")
|
||||||
|
|
||||||
|
pprint(shutdown)
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
stat=s.aria2.getGlobalStat("token:yajnuAdCemNathNojdi")
|
||||||
|
|
||||||
|
pprint(stat)
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
stopped=s.aria2.tellStopped("token:yajnuAdCemNathNojdi", 0, 9999)
|
||||||
|
|
||||||
|
pprint(stopped)
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
waiting=s.aria2.tellWaiting("token:yajnuAdCemNathNojdi", 0, 9999)
|
||||||
|
|
||||||
|
pprint(waiting)
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
# XXX Should check input is sane...
|
# XXX Should check input is sane...
|
||||||
|
|
||||||
APIURL="https://network.satnogs.org/api"
|
APIURL="https://network.satnogs.org/api"
|
||||||
DOWNDIR="download"
|
DOWNDIR="/srv/satnogs/download"
|
||||||
OBSIDMIN="$1"
|
OBSIDMIN="$1"
|
||||||
OBSIDMAX="$2"
|
OBSIDMAX="$2"
|
||||||
OBSID=$OBSIDMIN
|
OBSID=$OBSIDMIN
|
|
@ -12,12 +12,12 @@
|
||||||
OBSID="$1"
|
OBSID="$1"
|
||||||
|
|
||||||
# Download observation
|
# Download observation
|
||||||
./wut-water $OBSID
|
wut-water $OBSID
|
||||||
|
|
||||||
# Get previous rating
|
# Get previous rating
|
||||||
VET=`cat download/$OBSID/$OBSID.json | jq --compact-output '.[0] | {vetted_status}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
VET=`cat download/$OBSID/$OBSID.json | jq --compact-output '.[0] | {vetted_status}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
||||||
echo "Vetted Status: $VET"
|
echo "Vetted Status: $VET"
|
||||||
|
|
||||||
# Get Machine Learning Result
|
# Get Machine Learning Result
|
||||||
./wut $OBSID
|
wut $OBSID
|
||||||
|
|
|
@ -25,7 +25,7 @@ do
|
||||||
echo -n "Vet: $VET "
|
echo -n "Vet: $VET "
|
||||||
|
|
||||||
# Get Machine Learning Result
|
# Get Machine Learning Result
|
||||||
WUT_VET=`./wut $OBSID | cut -f 2 -d " "`
|
WUT_VET=`wut $OBSID | cut -f 2 -d " "`
|
||||||
echo -n "Wut: $WUT_VET "
|
echo -n "Wut: $WUT_VET "
|
||||||
if [ $VET = $WUT_VET ] ; then
|
if [ $VET = $WUT_VET ] ; then
|
||||||
let CORRECT=$CORRECT+1
|
let CORRECT=$CORRECT+1
|
|
@ -27,7 +27,7 @@ do
|
||||||
echo -n "$OBSID "
|
echo -n "$OBSID "
|
||||||
echo -n "Vet: $VET "
|
echo -n "Vet: $VET "
|
||||||
# Get Machine Learning Result
|
# Get Machine Learning Result
|
||||||
WUT_VETS=`./wut $OBSID`
|
WUT_VETS=`wut $OBSID`
|
||||||
WUT_VET=`echo $WUT_VETS | cut -f 2 -d " "`
|
WUT_VET=`echo $WUT_VETS | cut -f 2 -d " "`
|
||||||
WUT_RATE=`echo $WUT_VETS | cut -f 1 -d " "`
|
WUT_RATE=`echo $WUT_VETS | cut -f 1 -d " "`
|
||||||
echo -n "$WUT_VET, "
|
echo -n "$WUT_VET, "
|
|
@ -32,7 +32,7 @@ do
|
||||||
echo -n "$OBSID "
|
echo -n "$OBSID "
|
||||||
echo -n "Vet: $VET "
|
echo -n "Vet: $VET "
|
||||||
# Get Machine Learning Result
|
# Get Machine Learning Result
|
||||||
WUT_VETS=`./wut $OBSID | cut -f 2 -d " "`
|
WUT_VETS=`wut $OBSID | cut -f 2 -d " "`
|
||||||
WUT_VET=`echo $WUT_VETS | tail -1 | cut -f 2 -d " "`
|
WUT_VET=`echo $WUT_VETS | tail -1 | cut -f 2 -d " "`
|
||||||
WUT_RATE=`echo $WUT_VETS | head -1`
|
WUT_RATE=`echo $WUT_VETS | head -1`
|
||||||
echo -n "Wut: $WUT_VET "
|
echo -n "Wut: $WUT_VET "
|
|
@ -36,7 +36,7 @@ do
|
||||||
echo -n "$OBSID, "
|
echo -n "$OBSID, "
|
||||||
echo -n "$VET, "
|
echo -n "$VET, "
|
||||||
# Get Machine Learning Result
|
# Get Machine Learning Result
|
||||||
WUT_VETS=`./wut $OBSID`
|
WUT_VETS=`wut $OBSID`
|
||||||
WUT_VET=`echo $WUT_VETS | cut -f 2 -d " "`
|
WUT_VET=`echo $WUT_VETS | cut -f 2 -d " "`
|
||||||
WUT_RATE=`echo $WUT_VETS | cut -f 1 -d " "`
|
WUT_RATE=`echo $WUT_VETS | cut -f 1 -d " "`
|
||||||
echo -n "$WUT_VET, "
|
echo -n "$WUT_VET, "
|
|
@ -22,9 +22,11 @@ OBSIDMIN="$1"
|
||||||
OBSIDMAX="$2"
|
OBSIDMAX="$2"
|
||||||
OBSID=$OBSIDMIN
|
OBSID=$OBSIDMIN
|
||||||
|
|
||||||
|
cd /srv/satnogs
|
||||||
|
|
||||||
# Enable the following if you want to download waterfalls in this range:
|
# Enable the following if you want to download waterfalls in this range:
|
||||||
#echo "Downloading Waterfalls"
|
#echo "Downloading Waterfalls"
|
||||||
#./wut-water-range $OBSIDMIN $OBSIDMAX
|
#wut-water-range $OBSIDMIN $OBSIDMAX
|
||||||
|
|
||||||
# XXX remove data/train and data/val directories XXX
|
# XXX remove data/train and data/val directories XXX
|
||||||
echo "Removing data/ subdirectories"
|
echo "Removing data/ subdirectories"
|
|
@ -20,6 +20,8 @@
|
||||||
#
|
#
|
||||||
# Possible vetted_status: bad, failed, good, null, unknown.
|
# Possible vetted_status: bad, failed, good, null, unknown.
|
||||||
|
|
||||||
|
cd /srv/satnogs
|
||||||
|
|
||||||
OBSTX="$1"
|
OBSTX="$1"
|
||||||
OBSIDMIN="$2"
|
OBSIDMIN="$2"
|
||||||
OBSIDMAX="$3"
|
OBSIDMAX="$3"
|
||||||
|
@ -27,7 +29,7 @@ OBSID=$OBSIDMIN
|
||||||
|
|
||||||
# Enable the following if you want to download waterfalls in this range:
|
# Enable the following if you want to download waterfalls in this range:
|
||||||
#echo "Downloading Waterfalls"
|
#echo "Downloading Waterfalls"
|
||||||
#./wut-water-range $OBSIDMIN $OBSIDMAX
|
#wut-water-range $OBSIDMIN $OBSIDMAX
|
||||||
|
|
||||||
# XXX remove data/train and data/val directories XXX
|
# XXX remove data/train and data/val directories XXX
|
||||||
echo "Removing data/ subdirectories"
|
echo "Removing data/ subdirectories"
|
|
@ -1,20 +1,29 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# wut-dl-sort-txmode
|
# wut-dl-sort-txmode
|
||||||
#
|
#
|
||||||
|
# XXX This script removes directories in data/ !!! XXX
|
||||||
|
#
|
||||||
# Populates the data/ directory from the download/dir.
|
# Populates the data/ directory from the download/dir.
|
||||||
# Does it just for a specific transmitter mode (encoding)
|
# Does it just for a specific transmitter mode (encoding)
|
||||||
# Available encodings:
|
|
||||||
# AFSK AFSK1k2 AHRPT APT BPSK BPSK1k2 BPSK9k6 BPSK12k5 BPSK400 CERTO CW DUV
|
|
||||||
# FFSK1k2 FM FSK1k2 FSK4k8 FSK9k6 FSK19k2 GFSK1k2 GFSK2k4 GFSK4k8 GFSK9k6
|
|
||||||
# GFSK19k2 GFSK Rktr GMSK GMSK1k2 GMSK2k4 GMSK4k8 GMSK9k6 GMSK19k2 HRPT LRPT
|
|
||||||
# MSK1k2 MSK2k4 MSK4k8 PSK PSK31 SSTV USB WSJT
|
|
||||||
#
|
#
|
||||||
# XXX This script removes directories in data/ !!! XXX
|
# Available encodings:
|
||||||
|
# 4FSK AFSK_TUBiX10 AFSK AHRPT AM APT ASK BPSK_PMT-A3 BPSK CERTO CW DBPSK DOKA
|
||||||
|
# DPSK DQPSK DSTAR DUV DVB-S2 FFSK FMN FM FSK_AX.25_G3RUH FSK_AX.100_Mode_5
|
||||||
|
# FSK_AX.100_Mode_6 FSK GFSK_Rktr GFSK GFSK/BPSK GMSK_USP GMSK HRPT LRPT LSB
|
||||||
|
# LoRa MFSK MSK_AX.100_Mode_5 MSK_AX.100_Mode_6 MSK OFDM OQPSK PSK31 PSK63 PSK
|
||||||
|
# QPSK31 QPSK63 QPSK SSTV USB WSJT
|
||||||
|
#
|
||||||
|
# Encoding list generator:
|
||||||
|
# for i in `curl --silent https://db.satnogs.org/api/modes/ | jq '.[] | .name' | sort -V | sed -e 's/"//g' -e 's/ /_/g' -e 's/\//_/g'` ; do echo -n "$i " ; done ; echo
|
||||||
#
|
#
|
||||||
# Usage:
|
# Usage:
|
||||||
# wut-dl-sort-txmode [Encoding] [Minimum Observation ID] [Maximum Observation ID]
|
# wut-dl-sort-txmode [Encoding] [Minimum Observation ID] [Maximum Observation ID]
|
||||||
# Example:
|
# Example:
|
||||||
# wut-dl-sort-txmode CW 1467000 1470000
|
# wut-dl-sort-txmode CW 1467000 1470000
|
||||||
|
# For December, 2019 Example:
|
||||||
|
# wut-dl-sort-txmode CW 1292461 1470525
|
||||||
|
# For July, 2022 Example:
|
||||||
|
# wut-dl-sort-txmode BPSK1k2 6154228 6283338
|
||||||
#
|
#
|
||||||
# * Takes the files in the download/ dir.
|
# * Takes the files in the download/ dir.
|
||||||
# * Looks at the JSON files to see if it is :good", "bad", or "failed".
|
# * Looks at the JSON files to see if it is :good", "bad", or "failed".
|
||||||
|
@ -23,25 +32,31 @@
|
||||||
#
|
#
|
||||||
# Possible vetted_status: bad, failed, good, null, unknown.
|
# Possible vetted_status: bad, failed, good, null, unknown.
|
||||||
|
|
||||||
|
|
||||||
OBSENC="$1"
|
OBSENC="$1"
|
||||||
OBSIDMIN="$2"
|
OBSIDMIN="$2"
|
||||||
OBSIDMAX="$3"
|
OBSIDMAX="$3"
|
||||||
OBSID=$OBSIDMIN
|
OBSID=$OBSIDMIN
|
||||||
|
DATADIR="/srv/satnogs/data/txmodes/$OBSENC"
|
||||||
|
DOWNDIR="/srv/satnogs/download"
|
||||||
|
|
||||||
|
mkdir -p $DATADIR
|
||||||
|
cd $DATADIR || exit
|
||||||
|
|
||||||
# Enable the following if you want to download waterfalls in this range:
|
# Enable the following if you want to download waterfalls in this range:
|
||||||
#echo "Downloading Waterfalls"
|
#echo "Downloading Waterfalls"
|
||||||
#./wut-water-range $OBSIDMIN $OBSIDMAX
|
#wut-water-range $OBSIDMIN $OBSIDMAX
|
||||||
|
|
||||||
# XXX remove data/train and data/val directories XXX
|
# XXX remove data/train and data/val directories XXX
|
||||||
echo "Removing data/ subdirectories"
|
echo "Removing subdirectories"
|
||||||
rm -rf data/train data/val
|
rm -rf train/ val/
|
||||||
# Create new empty dirs
|
# Create new empty dirs
|
||||||
mkdir -p data/train/good data/train/bad data/train/failed
|
mkdir -p train/good/ train/bad/ train/failed/
|
||||||
mkdir -p data/val/good data/val/bad data/val/failed
|
mkdir -p val/good/ val/bad/ val/failed/
|
||||||
|
|
||||||
# Then parse each file and link appropriately
|
# Then parse each file and link appropriately
|
||||||
echo "Parsing download/ directory for observation IDs $OBSIDMIN to $OBSIDMAX"
|
echo "Parsing download/ directory for observation IDs $OBSIDMIN to $OBSIDMAX"
|
||||||
cd download/ || exit
|
cd $DOWNDIR || exit
|
||||||
|
|
||||||
while [ $OBSID -lt $OBSIDMAX ]
|
while [ $OBSID -lt $OBSIDMAX ]
|
||||||
do cd $OBSID
|
do cd $OBSID
|
||||||
|
@ -55,11 +70,11 @@ while [ $OBSID -lt $OBSIDMAX ]
|
||||||
CLASS_DIR="val"
|
CLASS_DIR="val"
|
||||||
fi
|
fi
|
||||||
case "$VET" in
|
case "$VET" in
|
||||||
bad) ln waterfall_$OBSID_*.png ../../data/$CLASS_DIR/$VET/
|
bad) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
;;
|
;;
|
||||||
good) ln waterfall_$OBSID_*.png ../../data/$CLASS_DIR/$VET/
|
good) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
;;
|
;;
|
||||||
failed) ln waterfall_$OBSID_*.png ../../data/$CLASS_DIR/$VET/
|
failed) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
;;
|
;;
|
||||||
null) echo "null, not copying"
|
null) echo "null, not copying"
|
||||||
;;
|
;;
|
||||||
|
@ -70,3 +85,4 @@ while [ $OBSID -lt $OBSIDMAX ]
|
||||||
let OBSID=$OBSID+1
|
let OBSID=$OBSID+1
|
||||||
cd ..
|
cd ..
|
||||||
done
|
done
|
||||||
|
|
|
@ -0,0 +1,89 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# wut-dl-sort-txmode-all
|
||||||
|
#
|
||||||
|
# XXX This script removes directories in data/ !!! XXX
|
||||||
|
#
|
||||||
|
# Training of all waterfalls. Used for modes that have few samples.
|
||||||
|
#
|
||||||
|
# Populates the data/ directory from the download/dir.
|
||||||
|
# Does it just for a specific transmitter mode (encoding)
|
||||||
|
#
|
||||||
|
# Available encodings:
|
||||||
|
# 4FSK AFSK_TUBiX10 AFSK AHRPT AM APT ASK BPSK_PMT-A3 BPSK CERTO CW DBPSK DOKA
|
||||||
|
# DPSK DQPSK DSTAR DUV DVB-S2 FFSK FMN FM FSK_AX.25_G3RUH FSK_AX.100_Mode_5
|
||||||
|
# FSK_AX.100_Mode_6 FSK GFSK_Rktr GFSK GFSK/BPSK GMSK_USP GMSK HRPT LRPT LSB
|
||||||
|
# LoRa MFSK MSK_AX.100_Mode_5 MSK_AX.100_Mode_6 MSK OFDM OQPSK PSK31 PSK63 PSK
|
||||||
|
# QPSK31 QPSK63 QPSK SSTV USB WSJT
|
||||||
|
#
|
||||||
|
# Encoding list generator:
|
||||||
|
# for i in `curl --silent https://db.satnogs.org/api/modes/ | jq '.[] | .name' | sort -V | sed -e 's/"//g' -e 's/ /_/g' -e 's/\//_/g'` ; do echo -n "$i " ; done ; echo
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# wut-dl-sort-txmode-all [Minimum Observation ID] [Maximum Observation ID]
|
||||||
|
# Example:
|
||||||
|
# wut-dl-sort-txmode-all 1467000 1470000
|
||||||
|
# For December, 2019 Example:
|
||||||
|
# wut-dl-sort-txmode-all 1292461 1470525
|
||||||
|
# wut-dl-sort-txmode-all 1292434 1470525
|
||||||
|
#
|
||||||
|
# * Takes the files in the download/ dir.
|
||||||
|
# * Looks at the JSON files to see if it is :good", "bad", or "failed".
|
||||||
|
# * Hard link it in the appropriate data/ directory.
|
||||||
|
# * File is randomly copied to either data/train or data/val directory.
|
||||||
|
#
|
||||||
|
# Possible vetted_status: bad, failed, good, null, unknown.
|
||||||
|
|
||||||
|
OBSENC="ALL"
|
||||||
|
OBSIDMIN="$1"
|
||||||
|
OBSIDMAX="$2"
|
||||||
|
OBSID=$OBSIDMIN
|
||||||
|
DATADIR="/srv/satnogs/data/txmodes/$OBSENC"
|
||||||
|
DOWNDIR="/srv/satnogs/download"
|
||||||
|
|
||||||
|
mkdir -p $DATADIR
|
||||||
|
cd $DATADIR || exit
|
||||||
|
|
||||||
|
# Enable the following if you want to download waterfalls in this range:
|
||||||
|
#echo "Downloading Waterfalls"
|
||||||
|
#wut-water-range $OBSIDMIN $OBSIDMAX
|
||||||
|
|
||||||
|
# XXX remove data/train and data/val directories XXX
|
||||||
|
echo "Removing subdirectories"
|
||||||
|
rm -rf train/ val/
|
||||||
|
# Create new empty dirs
|
||||||
|
mkdir -p train/good/ train/bad/ train/failed/
|
||||||
|
mkdir -p val/good/ val/bad/ val/failed/
|
||||||
|
|
||||||
|
# Then parse each file and link appropriately
|
||||||
|
echo "Parsing download/ directory for observation IDs $OBSIDMIN to $OBSIDMAX"
|
||||||
|
cd $DOWNDIR || exit
|
||||||
|
|
||||||
|
while [ $OBSID -lt $OBSIDMAX ]
|
||||||
|
do cd $OBSID
|
||||||
|
VET=`cat $OBSID.json | jq --compact-output '.[0] | {vetted_status}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
||||||
|
ENC=`cat $OBSID.json | jq --compact-output '.[0] | {transmitter_mode}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
||||||
|
# Do all of them
|
||||||
|
if [ "$OBSENC" = "$OBSENC" ] ; then
|
||||||
|
RAND_DIR=`echo $((0 + RANDOM % 2))`
|
||||||
|
if [ $RAND_DIR = 1 ] ; then
|
||||||
|
CLASS_DIR="train"
|
||||||
|
else
|
||||||
|
CLASS_DIR="val"
|
||||||
|
fi
|
||||||
|
case "$VET" in
|
||||||
|
bad) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
|
;;
|
||||||
|
good) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
|
;;
|
||||||
|
failed) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
|
;;
|
||||||
|
null) echo "null, not copying"
|
||||||
|
;;
|
||||||
|
unknown) echo "unknown, not copying"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
let OBSID=$OBSID+1
|
||||||
|
cd ..
|
||||||
|
done
|
||||||
|
|
|
@ -8,6 +8,8 @@
|
||||||
# Example:
|
# Example:
|
||||||
# wut-files
|
# wut-files
|
||||||
|
|
||||||
|
cd /srv/satnogs
|
||||||
|
|
||||||
echo
|
echo
|
||||||
DF=`df -h download/`
|
DF=`df -h download/`
|
||||||
echo "$DF"
|
echo "$DF"
|
|
@ -8,6 +8,8 @@
|
||||||
# Example:
|
# Example:
|
||||||
# wut-files
|
# wut-files
|
||||||
|
|
||||||
|
cd /srv/satnogs
|
||||||
|
|
||||||
TRAIN=`find data/train -type f | wc -l`
|
TRAIN=`find data/train -type f | wc -l`
|
||||||
echo
|
echo
|
||||||
echo "Training Files: $TRAIN"
|
echo "Training Files: $TRAIN"
|
|
@ -0,0 +1,36 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# wut-files
|
||||||
|
#
|
||||||
|
# Tells you about what files you have in downloads/ and data/
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# wut-files
|
||||||
|
# Example:
|
||||||
|
# wut-files
|
||||||
|
|
||||||
|
cd /srv/satnogs/data/txmodes/ALL
|
||||||
|
|
||||||
|
ALL=`find . -type f | wc -l`
|
||||||
|
echo
|
||||||
|
echo "All Files: $ALL"
|
||||||
|
TRAIN=`find train -type f | wc -l`
|
||||||
|
echo
|
||||||
|
echo "Training Files: $TRAIN"
|
||||||
|
VAL=`find val -type f | wc -l`
|
||||||
|
echo "Validation Files: $VAL"
|
||||||
|
TRAINGOOD=`find train/good/ -name '*.png' | wc -l`
|
||||||
|
echo
|
||||||
|
echo "Training Good: $TRAINGOOD"
|
||||||
|
TRAINBAD=`find train/bad/ -name '*.png' | wc -l`
|
||||||
|
echo "Training Bad: $TRAINBAD"
|
||||||
|
TRAINFAILED=`find train/failed/ -name '*.png' | wc -l`
|
||||||
|
echo "Training Failed: $TRAINFAILED"
|
||||||
|
echo
|
||||||
|
VALGOOD=`find val/good/ -name '*.png' | wc -l`
|
||||||
|
echo "Validation Good: $VALGOOD"
|
||||||
|
VALBAD=`find val/bad/ -name '*.png' | wc -l`
|
||||||
|
echo "Validation Bad: $VALBAD"
|
||||||
|
VALFAILED=`find val/failed/ -name '*.png' | wc -l`
|
||||||
|
echo "Validation Failed: $VALFAILED"
|
||||||
|
echo
|
||||||
|
|
|
@ -0,0 +1,74 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# wut-ia-sha1 --- Verify downloaded files checksums
|
||||||
|
#
|
||||||
|
# XXX uses both ET and xml.parsers.expat
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
from xml.parsers.expat import ParserCreate, ExpatError, errors
|
||||||
|
from pathlib import Path
|
||||||
|
import hashlib
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
dl_dir=Path('/srv/dl')
|
||||||
|
|
||||||
|
def convertxml(xmlfile, xml_attribs=True):
|
||||||
|
with open(xmlfile, "rb") as f:
|
||||||
|
d = xmltodict.parse(f, xml_attribs=xml_attribs, process_namespaces=False)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
parser = argparse.ArgumentParser(description='sha1 check Internet Archive downloads')
|
||||||
|
parser.add_argument('observations',
|
||||||
|
type=str,
|
||||||
|
help='Observation set. Example: 006050001-006060000')
|
||||||
|
args = parser.parse_args()
|
||||||
|
obs_set = 'satnogs-observations-' + args.observations
|
||||||
|
obs_dir = Path(dl_dir, obs_set)
|
||||||
|
filename_xml = obs_set + '_files.xml'
|
||||||
|
print('filename XML:', filename_xml)
|
||||||
|
xmlfile = Path(obs_dir, filename_xml)
|
||||||
|
p = ParserCreate()
|
||||||
|
try:
|
||||||
|
p.ParseFile(open(xmlfile, 'rb'))
|
||||||
|
except:
|
||||||
|
print('No XML file to process')
|
||||||
|
exit()
|
||||||
|
|
||||||
|
return(xmlfile, obs_dir)
|
||||||
|
|
||||||
|
def get_sha1(filename):
|
||||||
|
sha1 = hashlib.sha1()
|
||||||
|
try:
|
||||||
|
with open(filename, 'rb') as f:
|
||||||
|
while True:
|
||||||
|
data = f.read(1048576)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
sha1.update(data)
|
||||||
|
return sha1.hexdigest()
|
||||||
|
|
||||||
|
except:
|
||||||
|
status='EXCEPTION'
|
||||||
|
|
||||||
|
def process_set(xmlfile, obs_dir):
|
||||||
|
root_node = ET.parse(xmlfile).getroot()
|
||||||
|
for tag in root_node.findall('file'):
|
||||||
|
name = tag.get('name')
|
||||||
|
for file_sha1 in tag.iter('sha1'):
|
||||||
|
filename = Path(obs_dir, name)
|
||||||
|
sha1_hash=get_sha1(filename)
|
||||||
|
if sha1_hash == file_sha1.text:
|
||||||
|
print('OK ', end='')
|
||||||
|
else:
|
||||||
|
print('FAIL ', end='')
|
||||||
|
print(name)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
xmlfile, obs_dir = parse_args()
|
||||||
|
process_set(xmlfile, obs_dir)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main();
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# wut-ia-torrents --- Download SatNOGS torrents from the Internet Archive.
|
||||||
|
#
|
||||||
|
# https://archive.org/details/satnogs
|
||||||
|
|
||||||
|
from internetarchive import get_item
|
||||||
|
from internetarchive import get_session
|
||||||
|
from internetarchive import download
|
||||||
|
from internetarchive import search_items
|
||||||
|
import time
|
||||||
|
|
||||||
|
# Download dir
|
||||||
|
obs_dl='/srv/dl'
|
||||||
|
|
||||||
|
s = get_session()
|
||||||
|
s.mount_http_adapter()
|
||||||
|
search_results = s.search_items('satnogs-observations')
|
||||||
|
for i in search_items('identifier:satnogs-observations-*'):
|
||||||
|
obs_id=(i['identifier'])
|
||||||
|
print('Collection', obs_id)
|
||||||
|
download(obs_id, verbose=True, glob_pattern='*.torrent',
|
||||||
|
checksum=True, destdir=obs_dl,
|
||||||
|
retries=4, ignore_errors=True)
|
||||||
|
|
||||||
|
download(obs_id, verbose=True, glob_pattern='*_files.xml',
|
||||||
|
checksum=True, destdir=obs_dl,
|
||||||
|
retries=4, ignore_errors=True)
|
||||||
|
|
||||||
|
time.sleep(3)
|
||||||
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
#!/usr/bin/python3
|
||||||
|
#
|
||||||
|
# wut-img-ck.py
|
||||||
|
#
|
||||||
|
# Validate images.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
# All of download...
|
||||||
|
for waterfall in glob.glob('/srv/satnogs/download/*/waterfall*.png'):
|
||||||
|
print(waterfall)
|
||||||
|
v_image = Image.open(waterfall)
|
||||||
|
v_image.verify()
|
||||||
|
|
||||||
|
# Individual training dirs
|
||||||
|
for waterfall in glob.glob('/srv/satnogs/data/train/good/waterfall*.png'):
|
||||||
|
print(waterfall)
|
||||||
|
v_image = Image.open(waterfall)
|
||||||
|
v_image.verify()
|
||||||
|
|
||||||
|
for waterfall in glob.glob('/srv/satnogs/data/train/bad/waterfall*.png'):
|
||||||
|
print(waterfall)
|
||||||
|
v_image = Image.open(waterfall)
|
||||||
|
v_image.verify()
|
||||||
|
|
||||||
|
for waterfall in glob.glob('/srv/satnogs/data/val/good/waterfall*.png'):
|
||||||
|
print(waterfall)
|
||||||
|
v_image = Image.open(waterfall)
|
||||||
|
v_image.verify()
|
||||||
|
|
||||||
|
for waterfall in glob.glob('/srv/satnogs/data/val/bad/waterfall*.png'):
|
||||||
|
print(waterfall)
|
||||||
|
v_image = Image.open(waterfall)
|
||||||
|
v_image.verify()
|
||||||
|
|
|
@ -16,25 +16,25 @@
|
||||||
import os
|
import os
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.python.keras
|
import tensorflow.python.keras
|
||||||
from tensorflow.python.keras import Sequential
|
from tensorflow.keras import Sequential
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
from tensorflow.python.keras import optimizers
|
from tensorflow.keras import optimizers
|
||||||
from tensorflow.python.keras.preprocessing import image
|
from tensorflow.keras.preprocessing import image
|
||||||
from tensorflow.python.keras.models import load_model
|
from tensorflow.keras.models import load_model
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
|
||||||
# XXX
|
# XXX
|
||||||
from tensorflow.python.keras.models import Model
|
from tensorflow.keras.models import Model
|
||||||
from tensorflow.python.keras.layers import Input, concatenate
|
from tensorflow.keras.layers import Input, concatenate
|
||||||
#from tensorflow.python.keras.optimizers import Adam
|
#from tensorflow.keras.optimizers import Adam
|
||||||
|
|
||||||
|
|
||||||
# XXX Plot
|
# XXX Plot
|
||||||
from tensorflow.python.keras.utils import plot_model
|
from tensorflow.keras.utils import plot_model
|
||||||
from tensorflow.python.keras.callbacks import ModelCheckpoint
|
from tensorflow.keras.callbacks import ModelCheckpoint
|
||||||
## for visualizing
|
## for visualizing
|
||||||
import matplotlib.pyplot as plt, numpy as np
|
import matplotlib.pyplot as plt, numpy as np
|
||||||
from sklearn.decomposition import PCA
|
from sklearn.decomposition import PCA
|
||||||
|
@ -68,9 +68,9 @@ datagen = ImageDataGenerator(
|
||||||
dtype='float32')
|
dtype='float32')
|
||||||
|
|
||||||
print("datagen.flow")
|
print("datagen.flow")
|
||||||
train_it = datagen.flow_from_directory('data/train/', class_mode='binary')
|
train_it = datagen.flow_from_directory('/srv/satnogs/data/train/', class_mode='binary')
|
||||||
val_it = datagen.flow_from_directory('data/val/', class_mode='binary')
|
val_it = datagen.flow_from_directory('/srv/satnogs/data/val/', class_mode='binary')
|
||||||
test_it = datagen.flow_from_directory('data/test/', class_mode='binary')
|
test_it = datagen.flow_from_directory('/srv/satnogs/data/test/', class_mode='binary')
|
||||||
|
|
||||||
print("train_it.next()")
|
print("train_it.next()")
|
||||||
trainX, trainY = train_it.next()
|
trainX, trainY = train_it.next()
|
|
@ -16,25 +16,25 @@
|
||||||
import os
|
import os
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.python.keras
|
import tensorflow.python.keras
|
||||||
from tensorflow.python.keras import Sequential
|
from tensorflow.keras import Sequential
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
from tensorflow.python.keras import optimizers
|
from tensorflow.keras import optimizers
|
||||||
from tensorflow.python.keras.preprocessing import image
|
from tensorflow.keras.preprocessing import image
|
||||||
from tensorflow.python.keras.models import load_model
|
from tensorflow.keras.models import load_model
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
|
||||||
# XXX
|
# XXX
|
||||||
from tensorflow.python.keras.models import Model
|
from tensorflow.keras.models import Model
|
||||||
from tensorflow.python.keras.layers import Input, concatenate
|
from tensorflow.keras.layers import Input, concatenate
|
||||||
#from tensorflow.python.keras.optimizers import Adam
|
#from tensorflow.keras.optimizers import Adam
|
||||||
|
|
||||||
|
|
||||||
# XXX Plot
|
# XXX Plot
|
||||||
from tensorflow.python.keras.utils import plot_model
|
from tensorflow.keras.utils import plot_model
|
||||||
from tensorflow.python.keras.callbacks import ModelCheckpoint
|
from tensorflow.keras.callbacks import ModelCheckpoint
|
||||||
## for visualizing
|
## for visualizing
|
||||||
import matplotlib.pyplot as plt, numpy as np
|
import matplotlib.pyplot as plt, numpy as np
|
||||||
from sklearn.decomposition import PCA
|
from sklearn.decomposition import PCA
|
||||||
|
@ -68,9 +68,9 @@ datagen = ImageDataGenerator(
|
||||||
dtype='float32')
|
dtype='float32')
|
||||||
|
|
||||||
print("datagen.flow")
|
print("datagen.flow")
|
||||||
train_it = datagen.flow_from_directory('data/train/', class_mode='binary')
|
train_it = datagen.flow_from_directory('/srv/satnogs/data/train/', class_mode='binary')
|
||||||
val_it = datagen.flow_from_directory('data/val/', class_mode='binary')
|
val_it = datagen.flow_from_directory('/srv/satnogs/data/val/', class_mode='binary')
|
||||||
test_it = datagen.flow_from_directory('data/test/', class_mode='binary')
|
test_it = datagen.flow_from_directory('/srv/satnogs/data/test/', class_mode='binary')
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -20,17 +20,17 @@
|
||||||
import os
|
import os
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.python.keras
|
import tensorflow.python.keras
|
||||||
from tensorflow.python.keras import Sequential
|
from tensorflow.keras import Sequential
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
from tensorflow.python.keras import optimizers
|
from tensorflow.keras import optimizers
|
||||||
from tensorflow.python.keras.preprocessing import image
|
from tensorflow.keras.preprocessing import image
|
||||||
from tensorflow.python.keras.models import load_model
|
from tensorflow.keras.models import load_model
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
|
||||||
model = load_model('data/wut.h5')
|
model = load_model('/srv/satnogs/data/wut.h5')
|
||||||
img_width=256
|
img_width=256
|
||||||
img_height=256
|
img_height=256
|
||||||
model = Sequential()
|
model = Sequential()
|
|
@ -19,20 +19,20 @@
|
||||||
import os
|
import os
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.python.keras
|
import tensorflow.python.keras
|
||||||
from tensorflow.python.keras import Sequential
|
from tensorflow.keras import Sequential
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
from tensorflow.python.keras import optimizers
|
from tensorflow.keras import optimizers
|
||||||
from tensorflow.python.keras.preprocessing import image
|
from tensorflow.keras.preprocessing import image
|
||||||
from tensorflow.python.keras.models import load_model
|
from tensorflow.keras.models import load_model
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
|
||||||
datagen = ImageDataGenerator()
|
datagen = ImageDataGenerator()
|
||||||
train_it = datagen.flow_from_directory('data/train/', class_mode='binary')
|
train_it = datagen.flow_from_directory('/srv/satnogs/data/train/', class_mode='binary')
|
||||||
val_it = datagen.flow_from_directory('data/val/', class_mode='binary')
|
val_it = datagen.flow_from_directory('/srv/satnogs/data/val/', class_mode='binary')
|
||||||
test_it = datagen.flow_from_directory('data/test/', class_mode='binary')
|
test_it = datagen.flow_from_directory('/srv/satnogs/data/test/', class_mode='binary')
|
||||||
batchX, batchy = train_it.next()
|
batchX, batchy = train_it.next()
|
||||||
print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))
|
print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))
|
||||||
img_width=256
|
img_width=256
|
|
@ -7,7 +7,7 @@
|
||||||
# Download Observation: JSON. Not waterfall, audio, or data files.
|
# Download Observation: JSON. Not waterfall, audio, or data files.
|
||||||
|
|
||||||
APIURL="https://network.satnogs.org/api"
|
APIURL="https://network.satnogs.org/api"
|
||||||
DOWNDIR="download"
|
DOWNDIR="/srv/satnogs/download"
|
||||||
|
|
||||||
cd $DOWNDIR || exit
|
cd $DOWNDIR || exit
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# wut-review-staging
|
# wut-review-staging
|
||||||
# Go through all the images in data/staging and review them.
|
# Go through all the images in data/staging and review them.
|
||||||
cd data/staging || exit
|
cd /srv/satnogs/data/staging || exit
|
||||||
for i in *.png
|
for i in *.png
|
||||||
do echo $i
|
do echo $i
|
||||||
rm ../test/unvetted/*.png
|
rm ../test/unvetted/*.png
|
|
@ -22,7 +22,7 @@ KEEP=100
|
||||||
# this is so bad no one should ever run it again
|
# this is so bad no one should ever run it again
|
||||||
#exit 0
|
#exit 0
|
||||||
# XXX Delete data in this directory! XXX
|
# XXX Delete data in this directory! XXX
|
||||||
cd data/test/unvetted/ || exit
|
cd /srv/satnogs/data/test/unvetted/ || exit
|
||||||
|
|
||||||
TOTALFILES=`ls waterfall_*.png | wc -l`
|
TOTALFILES=`ls waterfall_*.png | wc -l`
|
||||||
for wf in waterfall_*.png
|
for wf in waterfall_*.png
|
|
@ -15,17 +15,17 @@ import datetime
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
import tensorflow.python.keras
|
import tensorflow.python.keras
|
||||||
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
||||||
from tensorflow.python.keras import optimizers
|
from tensorflow.keras import optimizers
|
||||||
from tensorflow.python.keras import Sequential
|
from tensorflow.keras import Sequential
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
from tensorflow.python.keras.layers import Input, concatenate
|
from tensorflow.keras.layers import Input, concatenate
|
||||||
from tensorflow.python.keras.models import load_model
|
from tensorflow.keras.models import load_model
|
||||||
from tensorflow.python.keras.models import Model
|
from tensorflow.keras.models import Model
|
||||||
from tensorflow.python.keras.preprocessing import image
|
from tensorflow.keras.preprocessing import image
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
os.environ["TF_CONFIG"] = json.dumps({
|
os.environ["TF_CONFIG"] = json.dumps({
|
||||||
"cluster": {
|
"cluster": {
|
||||||
"worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222" ]
|
"worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222" ]
|
|
@ -7,7 +7,7 @@
|
||||||
# Download Observation: JSON and waterfall. Not audio or data files.
|
# Download Observation: JSON and waterfall. Not audio or data files.
|
||||||
|
|
||||||
APIURL="https://network.satnogs.org/api"
|
APIURL="https://network.satnogs.org/api"
|
||||||
DOWNDIR="download"
|
DOWNDIR="/srv/satnogs/download"
|
||||||
|
|
||||||
cd $DOWNDIR || exit
|
cd $DOWNDIR || exit
|
||||||
|
|
|
@ -9,9 +9,25 @@
|
||||||
#
|
#
|
||||||
# The last observation to start in 2019 was 1470525
|
# The last observation to start in 2019 was 1470525
|
||||||
# The last observation to start in 2019-11 was 1292461
|
# The last observation to start in 2019-11 was 1292461
|
||||||
|
#
|
||||||
|
# APPROXIMATE:
|
||||||
|
# Observations 2015: 1-86
|
||||||
|
# Observations 2016: 87-613. Many in 15,000 range too
|
||||||
|
# Observations 2017: 614-55551
|
||||||
|
# Observations 2018: 55551-388962
|
||||||
|
# Observations 2019: 388963-1470939
|
||||||
|
# Observations 2020: 1470940-3394851
|
||||||
|
# Observations 2021: 3394852-5231193
|
||||||
|
# Observations 2022-01 2022-04: 5231194-5712616
|
||||||
|
# Observations 2022-05 5712617-6021303
|
||||||
|
# Observations 2022-06 6021304-6154227
|
||||||
|
# Observations 2022-07 6154228-6283338
|
||||||
|
#
|
||||||
# NOTE! Observations are not in numerical order by chronology.
|
# NOTE! Observations are not in numerical order by chronology.
|
||||||
# It looks like it is ordered by scheduling, so an older observation can have
|
# It looks like it is ordered by scheduling, so an older observation can have
|
||||||
# a higher observation ID.
|
# a higher observation ID. So the above list is rough, not exact.
|
||||||
|
# Also, there are exceptions, such as observations with IDs far higher than
|
||||||
|
# others that year.
|
||||||
#
|
#
|
||||||
# So to get mostly all of the observations in December, 2019, run:
|
# So to get mostly all of the observations in December, 2019, run:
|
||||||
# wut-water-range 1292461 1470525
|
# wut-water-range 1292461 1470525
|
||||||
|
@ -21,7 +37,7 @@
|
||||||
# XXX Should check input is sane...
|
# XXX Should check input is sane...
|
||||||
|
|
||||||
APIURL="https://network.satnogs.org/api"
|
APIURL="https://network.satnogs.org/api"
|
||||||
DOWNDIR="download"
|
DOWNDIR="/srv/satnogs/download"
|
||||||
OBSIDMIN="$1"
|
OBSIDMIN="$1"
|
||||||
OBSIDMAX="$2"
|
OBSIDMAX="$2"
|
||||||
OBSID=$OBSIDMIN
|
OBSID=$OBSIDMIN
|
|
@ -0,0 +1,26 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# wut-worker
|
||||||
|
#
|
||||||
|
# Starts worker client.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# wut-worker
|
||||||
|
# Example:
|
||||||
|
# wut-worker
|
||||||
|
#
|
||||||
|
# Note:
|
||||||
|
# Each node needs a unique index number.
|
||||||
|
#
|
||||||
|
# NOTE!
|
||||||
|
# This generates the node number based off the hostname.
|
||||||
|
# The hosts are rs-ml1 through rs-ml10. The index starts at zero,
|
||||||
|
# so the index is hostname minus one (without alpha).
|
||||||
|
|
||||||
|
HOSTNUM=`hostname | sed -e 's/rs-ml//g'`
|
||||||
|
let HOSTNUM=$HOSTNUM-1
|
||||||
|
|
||||||
|
export TF_CONFIG='{"cluster": {"worker": [ "rs-ml1:23009", "rs-ml2:23009", "rs-ml3:23009", "rs-ml4:23009", "rs-ml5:23009", "rs-ml6:23009", "rs-ml7:23009", "rs-ml8:23009", "rs-ml9:23009", "rs-ml10:23009"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
||||||
|
|
||||||
|
echo $TF_CONFIG
|
||||||
|
wut-worker.py
|
||||||
|
|
|
@ -13,14 +13,15 @@
|
||||||
#
|
#
|
||||||
# NOTE!
|
# NOTE!
|
||||||
# This generates the node number based off the hostname.
|
# This generates the node number based off the hostname.
|
||||||
# The hosts are ml0 through ml5.
|
# The hosts are rs-ml0 through rs-ml10.
|
||||||
|
|
||||||
HOSTNUM=`hostname | sed -e 's/ml//g'`
|
HOSTNUM=`hostname | sed -e 's/rs-ml//g'`
|
||||||
|
|
||||||
#export TF_CONFIG='{"cluster": {"worker": [ "ml0-int:2222", "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
#export TF_CONFIG='{"cluster": {"worker": [ "ml0-int:2222", "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
||||||
export TF_CONFIG='{"cluster": {"worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}}'
|
#export TF_CONFIG='{"cluster": {"worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}}'
|
||||||
|
export TF_CONFIG='{"cluster": {"worker": [ "rs-ml1:23009", "rs-ml2:23009", "rs-ml3:23009", "rs-ml4:23009", "rs-ml5:23009", "rs-ml6:23009", "rs-ml7:23009", "rs-ml8:23009", "rs-ml9:23009", "rs-ml10:23009"]}}'
|
||||||
#export TF_CONFIG='{"cluster": {"chief": [ "ml0-int:2222" ], "worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
#export TF_CONFIG='{"cluster": {"chief": [ "ml0-int:2222" ], "worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
||||||
|
|
||||||
echo $TF_CONFIG
|
echo $TF_CONFIG
|
||||||
python3 wut-worker-mas.py
|
wut-worker-mas.py
|
||||||
|
|
|
@ -15,17 +15,17 @@ import datetime
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
import tensorflow.python.keras
|
import tensorflow.python.keras
|
||||||
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
||||||
from tensorflow.python.keras import optimizers
|
from tensorflow.keras import optimizers
|
||||||
from tensorflow.python.keras import Sequential
|
from tensorflow.keras import Sequential
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
from tensorflow.python.keras.layers import Input, concatenate
|
from tensorflow.keras.layers import Input, concatenate
|
||||||
from tensorflow.python.keras.models import load_model
|
from tensorflow.keras.models import load_model
|
||||||
from tensorflow.python.keras.models import Model
|
from tensorflow.keras.models import Model
|
||||||
from tensorflow.python.keras.preprocessing import image
|
from tensorflow.keras.preprocessing import image
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
|
|
||||||
tf.keras.backend.clear_session()
|
tf.keras.backend.clear_session()
|
||||||
tf.config.optimizer.set_jit(True)
|
tf.config.optimizer.set_jit(True)
|
||||||
|
@ -52,8 +52,8 @@ NUM_WORKERS = 6
|
||||||
GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS
|
GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS
|
||||||
|
|
||||||
# XXX
|
# XXX
|
||||||
POSITIVE_DIRECTORY = '/home/jebba/devel/spacecruft/satnogs-wut/data/pos'
|
POSITIVE_DIRECTORY = '/srv/satnogs/data/pos'
|
||||||
pos_dir = '/home/jebba/devel/spacecruft/satnogs-wut/data/posdir'
|
pos_dir = '/srv/satnogs/data/posdir'
|
||||||
|
|
||||||
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
|
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
|
||||||
tf.distribute.experimental.CollectiveCommunication.RING)
|
tf.distribute.experimental.CollectiveCommunication.RING)
|
||||||
|
@ -92,7 +92,7 @@ def process_image(image_bytes, label):
|
||||||
|
|
||||||
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||||
NUM_TOTAL_IMAGES=100
|
NUM_TOTAL_IMAGES=100
|
||||||
data_root = "/home/jebba/devel/spacecruft/satnogs-wut/data"
|
data_root = "/srv/satnogs/data"
|
||||||
profile_dir = os.path.join(data_root, "profiles")
|
profile_dir = os.path.join(data_root, "profiles")
|
||||||
dataset = tf.data.Dataset.list_files(data_root)
|
dataset = tf.data.Dataset.list_files(data_root)
|
||||||
dataset = dataset.shuffle(NUM_TOTAL_IMAGES)
|
dataset = dataset.shuffle(NUM_TOTAL_IMAGES)
|
||||||
|
@ -174,8 +174,8 @@ def handle_batching():
|
||||||
yield concat(batch)
|
yield concat(batch)
|
||||||
batch.reset()
|
batch.reset()
|
||||||
|
|
||||||
train_dir = os.path.join('data/', 'train')
|
train_dir = os.path.join('/srv/satnogs/data/', 'train')
|
||||||
val_dir = os.path.join('data/', 'val')
|
val_dir = os.path.join('/srv/satnogs/data/', 'val')
|
||||||
train_good_dir = os.path.join(train_dir, 'good')
|
train_good_dir = os.path.join(train_dir, 'good')
|
||||||
train_bad_dir = os.path.join(train_dir, 'bad')
|
train_bad_dir = os.path.join(train_dir, 'bad')
|
||||||
val_good_dir = os.path.join(val_dir, 'good')
|
val_good_dir = os.path.join(val_dir, 'good')
|
|
@ -1,10 +1,8 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
# coding: utf-8
|
#
|
||||||
|
# wut-worker.py
|
||||||
# In[ ]:
|
#
|
||||||
|
# wut --- What U Think? SatNOGS Observation AI, training application cluster edition.
|
||||||
|
|
||||||
# wut-train-cluster --- What U Think? SatNOGS Observation AI, training application cluster edition.
|
|
||||||
#
|
#
|
||||||
# https://spacecruft.org/spacecruft/satnogs-wut
|
# https://spacecruft.org/spacecruft/satnogs-wut
|
||||||
#
|
#
|
||||||
|
@ -12,41 +10,27 @@
|
||||||
# GPLv3+
|
# GPLv3+
|
||||||
# Built using Jupyter, Tensorflow, Keras
|
# Built using Jupyter, Tensorflow, Keras
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
import os
|
import os
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
import tensorflow.python.keras
|
import tensorflow.python.keras
|
||||||
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
||||||
from tensorflow.python.keras import optimizers
|
from tensorflow.keras import optimizers
|
||||||
from tensorflow.python.keras import Sequential
|
from tensorflow.keras import Sequential
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
from tensorflow.python.keras.layers import Input, concatenate
|
from tensorflow.keras.layers import Input, concatenate
|
||||||
from tensorflow.python.keras.models import load_model
|
from tensorflow.keras.models import load_model
|
||||||
from tensorflow.python.keras.models import Model
|
from tensorflow.keras.models import Model
|
||||||
from tensorflow.python.keras.preprocessing import image
|
from tensorflow.keras.preprocessing import image
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
|
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
get_ipython().run_line_magic('matplotlib', 'inline')
|
get_ipython().run_line_magic('matplotlib', 'inline')
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import seaborn as sns
|
import seaborn as sns
|
||||||
|
@ -55,28 +39,16 @@ import seaborn as sns
|
||||||
#import ipywidgets as widgets
|
#import ipywidgets as widgets
|
||||||
#from IPython.display import display, Image
|
#from IPython.display import display, Image
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
print('tf {}'.format(tf.__version__))
|
print('tf {}'.format(tf.__version__))
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
os.environ["TF_CONFIG"] = json.dumps({
|
os.environ["TF_CONFIG"] = json.dumps({
|
||||||
"cluster": {
|
"cluster": {
|
||||||
"worker": [ "ml0-int:2222", "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222" ]
|
"worker": ["ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222" ]
|
||||||
},
|
},
|
||||||
"task": {"type": "worker", "index": 0 },
|
"task": {"type": "worker", "index": 0 },
|
||||||
"num_workers": 6
|
"num_workers": 5
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
IMG_HEIGHT = 416
|
IMG_HEIGHT = 416
|
||||||
IMG_WIDTH= 804
|
IMG_WIDTH= 804
|
||||||
batch_size = 32
|
batch_size = 32
|
||||||
|
@ -88,36 +60,19 @@ epochs = 1
|
||||||
#batch_size = 128
|
#batch_size = 128
|
||||||
#epochs = 6
|
#epochs = 6
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
tf.keras.backend.clear_session()
|
tf.keras.backend.clear_session()
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
options = tf.data.Options()
|
options = tf.data.Options()
|
||||||
#options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
|
#options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
|
||||||
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.DATA
|
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.DATA
|
||||||
# XXX
|
# XXX
|
||||||
#dataset = dataset.with_options(options)
|
#dataset = dataset.with_options(options)
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
|
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
|
||||||
tf.distribute.experimental.CollectiveCommunication.RING)
|
tf.distribute.experimental.CollectiveCommunication.RING)
|
||||||
|
|
||||||
#mirrored_strategy = tf.distribute.MirroredStrategy(
|
#mirrored_strategy = tf.distribute.MirroredStrategy(
|
||||||
# cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
|
# cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
root_data_dir = ('/srv/satnogs')
|
root_data_dir = ('/srv/satnogs')
|
||||||
train_dir = os.path.join(root_data_dir, 'data/', 'train')
|
train_dir = os.path.join(root_data_dir, 'data/', 'train')
|
||||||
val_dir = os.path.join(root_data_dir,'data/', 'val')
|
val_dir = os.path.join(root_data_dir,'data/', 'val')
|
||||||
|
@ -132,10 +87,6 @@ num_val_bad = len(os.listdir(val_bad_dir))
|
||||||
total_train = num_train_good + num_train_bad
|
total_train = num_train_good + num_train_bad
|
||||||
total_val = num_val_good + num_val_bad
|
total_val = num_val_good + num_val_bad
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
print('total training good images:', num_train_good)
|
print('total training good images:', num_train_good)
|
||||||
print('total training bad images:', num_train_bad)
|
print('total training bad images:', num_train_bad)
|
||||||
print("--")
|
print("--")
|
||||||
|
@ -144,11 +95,6 @@ print('total validation good images:', num_val_good)
|
||||||
print('total validation bad images:', num_val_bad)
|
print('total validation bad images:', num_val_bad)
|
||||||
print("--")
|
print("--")
|
||||||
print("Total validation images:", total_val)
|
print("Total validation images:", total_val)
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
print("--")
|
print("--")
|
||||||
print("Reduce training and validation set when testing")
|
print("Reduce training and validation set when testing")
|
||||||
total_train = 100
|
total_train = 100
|
||||||
|
@ -156,10 +102,6 @@ total_val = 100
|
||||||
print("Reduced training images:", total_train)
|
print("Reduced training images:", total_train)
|
||||||
print("Reduced validation images:", total_val)
|
print("Reduced validation images:", total_val)
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
train_image_generator = ImageDataGenerator(
|
train_image_generator = ImageDataGenerator(
|
||||||
rescale=1./255
|
rescale=1./255
|
||||||
)
|
)
|
||||||
|
@ -177,18 +119,8 @@ val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,
|
||||||
directory=val_dir,
|
directory=val_dir,
|
||||||
target_size=(IMG_HEIGHT, IMG_WIDTH),
|
target_size=(IMG_HEIGHT, IMG_WIDTH),
|
||||||
class_mode='binary')
|
class_mode='binary')
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
#train_dist_dataset = strategy.experimental_distribute_dataset()
|
#train_dist_dataset = strategy.experimental_distribute_dataset()
|
||||||
#val_dist_dataset = strategy.experimental_distribute_dataset()
|
#val_dist_dataset = strategy.experimental_distribute_dataset()
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
sample_train_images, _ = next(train_data_gen)
|
sample_train_images, _ = next(train_data_gen)
|
||||||
sample_val_images, _ = next(val_data_gen)
|
sample_val_images, _ = next(val_data_gen)
|
||||||
# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.
|
# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.
|
||||||
|
@ -201,31 +133,18 @@ def plotImages(images_arr):
|
||||||
plt.tight_layout()
|
plt.tight_layout()
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
plotImages(sample_train_images[0:3])
|
#plotImages(sample_train_images[0:3])
|
||||||
plotImages(sample_val_images[0:3])
|
#plotImages(sample_val_images[0:3])
|
||||||
|
#get_ipython().run_line_magic('load_ext', 'tensorboard')
|
||||||
|
#get_ipython().system('rm -rf ./clusterlogs/')
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
get_ipython().run_line_magic('load_ext', 'tensorboard')
|
|
||||||
get_ipython().system('rm -rf ./clusterlogs/')
|
|
||||||
#log_dir="clusterlogs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
#log_dir="clusterlogs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
log_dir="clusterlogs"
|
#log_dir="clusterlogs"
|
||||||
#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
|
#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
|
||||||
tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir)
|
#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir)
|
||||||
#%tensorboard --logdir clusterlogs --port 6006
|
#%tensorboard --logdir clusterlogs --port 6006
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
strategy.num_replicas_in_sync
|
strategy.num_replicas_in_sync
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
## Compute global batch size using number of replicas.
|
## Compute global batch size using number of replicas.
|
||||||
#GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS
|
#GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS
|
||||||
BATCH_SIZE_PER_REPLICA = 8
|
BATCH_SIZE_PER_REPLICA = 8
|
||||||
|
@ -244,16 +163,8 @@ print("total_val // batch_size", total_val // batch_size)
|
||||||
#dataset = dataset.batch(global_batch_size)
|
#dataset = dataset.batch(global_batch_size)
|
||||||
#LEARNING_RATES_BY_BATCH_SIZE = {5: 0.1, 10: 0.15}
|
#LEARNING_RATES_BY_BATCH_SIZE = {5: 0.1, 10: 0.15}
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
#learning_rate = LEARNING_RATES_BY_BATCH_SIZE[global_batch_size]
|
#learning_rate = LEARNING_RATES_BY_BATCH_SIZE[global_batch_size]
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
def get_uncompiled_model():
|
def get_uncompiled_model():
|
||||||
model = Sequential([
|
model = Sequential([
|
||||||
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
|
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
|
||||||
|
@ -268,16 +179,7 @@ def get_uncompiled_model():
|
||||||
])
|
])
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
#get_uncompiled_model()
|
#get_uncompiled_model()
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
def get_compiled_model():
|
def get_compiled_model():
|
||||||
model = get_uncompiled_model()
|
model = get_uncompiled_model()
|
||||||
model.compile(optimizer='adam',
|
model.compile(optimizer='adam',
|
||||||
|
@ -285,25 +187,12 @@ def get_compiled_model():
|
||||||
metrics=['accuracy'])
|
metrics=['accuracy'])
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
# Create a checkpoint directory to store the checkpoints.
|
# Create a checkpoint directory to store the checkpoints.
|
||||||
#checkpoint_dir = './training_checkpoints'
|
#checkpoint_dir = './training_checkpoints'
|
||||||
#checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
|
#checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
#callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='tmp/keras-ckpt')]
|
#callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='tmp/keras-ckpt')]
|
||||||
#callbacks=[tensorboard_callback,callbacks]
|
#callbacks=[tensorboard_callback,callbacks]
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
#def get_fit_model():
|
#def get_fit_model():
|
||||||
# model = get_compiled_model()
|
# model = get_compiled_model()
|
||||||
# model.fit(
|
# model.fit(
|
||||||
|
@ -316,10 +205,6 @@ def get_compiled_model():
|
||||||
# )
|
# )
|
||||||
#return model
|
#return model
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
with strategy.scope():
|
with strategy.scope():
|
||||||
model = get_compiled_model()
|
model = get_compiled_model()
|
||||||
history = model.fit(
|
history = model.fit(
|
||||||
|
@ -331,16 +216,8 @@ with strategy.scope():
|
||||||
verbose=2
|
verbose=2
|
||||||
).batch(global_batch_size)
|
).batch(global_batch_size)
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
#model.summary()
|
#model.summary()
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
print("TRAINING info")
|
print("TRAINING info")
|
||||||
print(train_dir)
|
print(train_dir)
|
||||||
print(train_good_dir)
|
print(train_good_dir)
|
||||||
|
@ -351,40 +228,13 @@ print(train_data_gen)
|
||||||
#print(history)
|
#print(history)
|
||||||
#model.to_json()
|
#model.to_json()
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
#history = model.fit(X, y, batch_size=32, epochs=40, validation_split=0.1)
|
#history = model.fit(X, y, batch_size=32, epochs=40, validation_split=0.1)
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
model.save('data/models/FOO/wut-train-cluster2.tf')
|
model.save('data/models/FOO/wut-train-cluster2.tf')
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
model.save('data/models/FOO/wut-train-cluster2.h5')
|
model.save('data/models/FOO/wut-train-cluster2.h5')
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
model.save_weights('data/models/FOO/wut-weights-train-cluster2.tf')
|
model.save_weights('data/models/FOO/wut-weights-train-cluster2.tf')
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
model.save_weights('data/models/FOO/wut-weights-train-cluster2.h5')
|
model.save_weights('data/models/FOO/wut-weights-train-cluster2.h5')
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
acc = history.history['accuracy']
|
acc = history.history['accuracy']
|
||||||
val_acc = history.history['val_accuracy']
|
val_acc = history.history['val_accuracy']
|
||||||
loss = history.history['loss']
|
loss = history.history['loss']
|
||||||
|
@ -403,9 +253,3 @@ plt.legend(loc='upper right')
|
||||||
plt.title('Training and Validation Loss')
|
plt.title('Training and Validation Loss')
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
# In[ ]:
|
|
||||||
|
|
||||||
|
|
||||||
# The End
|
|
||||||
|
|
26
wut-worker
26
wut-worker
|
@ -1,26 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# wut-worker
|
|
||||||
#
|
|
||||||
# Starts worker client.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# wut-worker
|
|
||||||
# Example:
|
|
||||||
# wut-worker
|
|
||||||
#
|
|
||||||
# Note:
|
|
||||||
# Each node needs a unique index number.
|
|
||||||
#
|
|
||||||
# NOTE!
|
|
||||||
# This generates the node number based off the hostname.
|
|
||||||
# The hosts are ml1 through ml5. The index starts at zero,
|
|
||||||
# so the index is hostname minus one (without alpha).
|
|
||||||
|
|
||||||
HOSTNUM=`hostname | sed -e 's/ml//g'`
|
|
||||||
#let HOSTNUM=$HOSTNUM-1
|
|
||||||
|
|
||||||
export TF_CONFIG='{"cluster": {"worker": [ "10.100.100.130:2222", "ml1:2222", "ml2:2222", "ml3:2222", "ml4:2222", "ml5:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
|
||||||
|
|
||||||
echo $TF_CONFIG
|
|
||||||
python3 wut-worker.py
|
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# wut-worker-train-cluster-fn
|
|
||||||
#
|
|
||||||
# Starts worker client.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# wut-worker-train-cluster-fn
|
|
||||||
# Example:
|
|
||||||
# wut-worker-train-cluster-fn
|
|
||||||
#
|
|
||||||
# Note:
|
|
||||||
# Each node needs a unique index number.
|
|
||||||
#
|
|
||||||
# NOTE!
|
|
||||||
# This generates the node number based off the hostname.
|
|
||||||
# The hosts are ml1 through ml5. The index starts at zero,
|
|
||||||
# so the index is hostname minus one (without alpha).
|
|
||||||
|
|
||||||
HOSTNUM=`hostname | sed -e 's/ml//g'`
|
|
||||||
|
|
||||||
export TF_CONFIG='{"cluster": {"worker": [ "ml0-int:2222", "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
|
||||||
|
|
||||||
echo $TF_CONFIG
|
|
||||||
python3 wut-train-cluster-fn.py
|
|
||||||
|
|
|
@ -1,79 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
#
|
|
||||||
# wut-worker.py --- Runs on worker nodes.
|
|
||||||
#
|
|
||||||
# Start with wut-worker shell script to set correct
|
|
||||||
# environmental variables.
|
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
||||||
import simplejson as json
|
|
||||||
import os
|
|
||||||
import numpy as np
|
|
||||||
import tensorflow as tf
|
|
||||||
import tensorflow.python.keras
|
|
||||||
from tensorflow.python.keras import Sequential
|
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
|
||||||
from tensorflow.python.keras import optimizers
|
|
||||||
from tensorflow.python.keras.preprocessing import image
|
|
||||||
from tensorflow.python.keras.models import load_model
|
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
|
||||||
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
|
||||||
from tensorflow.python.keras.models import Model
|
|
||||||
from tensorflow.python.keras.layers import Input, concatenate
|
|
||||||
|
|
||||||
#batch_size = 32
|
|
||||||
#epochs = 4
|
|
||||||
IMG_HEIGHT = 416
|
|
||||||
IMG_WIDTH= 804
|
|
||||||
|
|
||||||
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
|
|
||||||
|
|
||||||
|
|
||||||
def get_uncompiled_model():
|
|
||||||
model = Sequential([
|
|
||||||
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
|
|
||||||
MaxPooling2D(),
|
|
||||||
Conv2D(32, 3, padding='same', activation='relu'),
|
|
||||||
MaxPooling2D(),
|
|
||||||
Conv2D(64, 3, padding='same', activation='relu'),
|
|
||||||
MaxPooling2D(),
|
|
||||||
Flatten(),
|
|
||||||
Dense(512, activation='relu'),
|
|
||||||
Dense(1, activation='sigmoid')
|
|
||||||
])
|
|
||||||
return model
|
|
||||||
|
|
||||||
def get_compiled_model():
|
|
||||||
model = get_uncompiled_model()
|
|
||||||
model.compile(optimizer='adam',
|
|
||||||
loss='binary_crossentropy',
|
|
||||||
metrics=['accuracy'])
|
|
||||||
return model
|
|
||||||
|
|
||||||
def get_fit_model():
|
|
||||||
model = get_compiled_model()
|
|
||||||
model.fit(
|
|
||||||
model )
|
|
||||||
return model
|
|
||||||
|
|
||||||
#def get_fit_model():
|
|
||||||
# model = get_compiled_model()
|
|
||||||
# model.fit(
|
|
||||||
# train_data_gen,
|
|
||||||
# steps_per_epoch=total_train // batch_size,
|
|
||||||
# epochs=epochs,
|
|
||||||
# validation_data=val_data_gen,
|
|
||||||
# validation_steps=total_val // batch_size,
|
|
||||||
# verbose=2
|
|
||||||
# )
|
|
||||||
# return model
|
|
||||||
|
|
||||||
with strategy.scope():
|
|
||||||
get_uncompiled_model()
|
|
||||||
get_compiled_model()
|
|
||||||
get_fit_model()
|
|
||||||
|
|
Loading…
Reference in New Issue