Compare commits
249 Commits
Author | SHA1 | Date |
---|---|---|
![]() |
7195382398 | |
![]() |
22fbe4073a | |
![]() |
ac2ae3f1cf | |
![]() |
fb236fcf30 | |
![]() |
8aee0ac624 | |
![]() |
8dd443ab30 | |
![]() |
33adccb2cb | |
![]() |
ec25b09b24 | |
![]() |
3068a39e3a | |
![]() |
827b950a14 | |
![]() |
1fbad10405 | |
![]() |
323ceda7eb | |
![]() |
b07a53458e | |
![]() |
8feeb6c896 | |
![]() |
6be11cb7aa | |
![]() |
3f6434f6dd | |
![]() |
73fbdf43da | |
![]() |
20d96f0a61 | |
![]() |
fa3d0b0284 | |
![]() |
2c86557fcb | |
![]() |
35dc603832 | |
![]() |
16956df5ca | |
![]() |
a094fea6bf | |
![]() |
8a5f8fe070 | |
![]() |
5e987198bc | |
![]() |
b6ac03590a | |
![]() |
270178d027 | |
![]() |
1517670e7c | |
|
4d175ce254 | |
|
e63c52299b | |
![]() |
2d7f366ecc | |
![]() |
16226ae82e | |
![]() |
a3bde31b09 | |
![]() |
c960a49b3f | |
![]() |
75a111c871 | |
![]() |
452d8ab0c9 | |
![]() |
b58edb583e | |
![]() |
6907baeb55 | |
![]() |
324a9b8d02 | |
![]() |
a9343fe66a | |
![]() |
067605de08 | |
![]() |
32322b7f15 | |
![]() |
94fd593865 | |
![]() |
b9600fab49 | |
![]() |
b07f9d4e0f | |
![]() |
7d4e3d445e | |
![]() |
ebf16d1893 | |
![]() |
9c6ab722e5 | |
![]() |
8c7b80083b | |
![]() |
34feced97b | |
![]() |
9fdf801eb7 | |
![]() |
9c26f46464 | |
![]() |
b70aadee1a | |
![]() |
6cdb5a304e | |
![]() |
8cdf0970c9 | |
![]() |
c9c3398e9f | |
![]() |
65037f21d1 | |
![]() |
a1275b77f9 | |
![]() |
87234b88c6 | |
![]() |
b7c1712853 | |
![]() |
b52c07e8ed | |
![]() |
cb7150826f | |
![]() |
0415b186a7 | |
![]() |
653f9d0115 | |
![]() |
14d07b9c6c | |
![]() |
d33cfbdedd | |
![]() |
5a91662237 | |
![]() |
b828430d37 | |
![]() |
18bec38353 | |
![]() |
a2360bd109 | |
![]() |
98f355fb05 | |
![]() |
3eab82b19e | |
![]() |
0d25371eac | |
![]() |
3f3f8d3af2 | |
![]() |
d458c7236d | |
![]() |
cee5d151d9 | |
![]() |
d1edfd4ace | |
![]() |
2cb732eac7 | |
![]() |
1e1b147740 | |
![]() |
8d237bd2ba | |
![]() |
6ea9bce41c | |
![]() |
c3d3612da1 | |
![]() |
d48a041e83 | |
![]() |
0213f93029 | |
![]() |
4d23a231b0 | |
![]() |
d96585e514 | |
![]() |
7eb8676f11 | |
![]() |
298348aad0 | |
![]() |
5d6b0be4ab | |
![]() |
ca0ad42851 | |
![]() |
0bf1519409 | |
![]() |
5c921438b8 | |
![]() |
d8c685e58c | |
![]() |
81528c6044 | |
![]() |
983157846c | |
![]() |
a74bd74a4d | |
![]() |
1c67889a03 | |
![]() |
05715d19b5 | |
![]() |
6f3b4daab4 | |
![]() |
54b53d217a | |
![]() |
6b72e87dfc | |
![]() |
33923b906f | |
![]() |
0865b28c66 | |
![]() |
3442bffdf2 | |
![]() |
f17c2c3672 | |
![]() |
469efb2050 | |
![]() |
30675bc5ca | |
![]() |
2c3d73fd41 | |
![]() |
86584c1f27 | |
![]() |
3eef31079b | |
![]() |
8d302f4b00 | |
![]() |
68c30f255b | |
![]() |
5c083de35b | |
![]() |
d56603c7f9 | |
![]() |
c93fb9df76 | |
![]() |
32b8d2d521 | |
![]() |
d102e6dacf | |
![]() |
777c8b6106 | |
![]() |
3f4dc0a815 | |
![]() |
6486b1d0b3 | |
![]() |
85e047e46c | |
![]() |
bb6b54ce81 | |
![]() |
f06d83db73 | |
![]() |
d6866fa547 | |
![]() |
1d3a0b10f2 | |
![]() |
5c51e9f86d | |
![]() |
3cfb0fd0be | |
![]() |
45c0fcd5df | |
![]() |
4a46d037d3 | |
![]() |
efb7f30792 | |
![]() |
b950af525c | |
![]() |
e78275f0c6 | |
![]() |
a36ab62de9 | |
![]() |
f9796bcf1b | |
![]() |
709b7c53a3 | |
![]() |
18cc36dfa6 | |
![]() |
48173b25d7 | |
![]() |
ef2a5fbf06 | |
![]() |
82d7902d04 | |
![]() |
70954759d2 | |
![]() |
796b101586 | |
![]() |
4806b13411 | |
![]() |
3086fd990e | |
![]() |
5701125da7 | |
![]() |
a4579ae4c5 | |
![]() |
f1f8ab8d9d | |
![]() |
f9bfe3a1b5 | |
![]() |
8ccbaab6ec | |
![]() |
7c9eb3cf32 | |
![]() |
f87a677b19 | |
![]() |
591a02952e | |
![]() |
d05e802255 | |
![]() |
6900a4a772 | |
![]() |
279f24f230 | |
![]() |
8570d3ad01 | |
![]() |
6c1a569246 | |
![]() |
5484aa9ad3 | |
![]() |
a1b06b702d | |
![]() |
daeb0f22d3 | |
![]() |
34141d40fa | |
![]() |
f9358fa6a1 | |
![]() |
dc12e9fc07 | |
![]() |
49a55c7c5f | |
![]() |
4d52b79872 | |
![]() |
17ce39d450 | |
![]() |
42f154215e | |
![]() |
736da8792d | |
![]() |
8d75467c75 | |
![]() |
4bc299901e | |
![]() |
e4d835c9cd | |
![]() |
aeea88756e | |
![]() |
8748eb120c | |
![]() |
c7255b4d14 | |
![]() |
530694c0d8 | |
![]() |
71ce17de48 | |
![]() |
da7e34f74e | |
![]() |
c53c98f34a | |
![]() |
f70d9b8ea7 | |
![]() |
3eba0b50ca | |
![]() |
13fddd00ae | |
![]() |
001f14d338 | |
![]() |
f5f048f13f | |
![]() |
6cccd8b455 | |
![]() |
24974e7a1d | |
![]() |
83041461cd | |
![]() |
4a2dcab55c | |
![]() |
99b554accb | |
![]() |
e8818d795b | |
![]() |
fba2b97f86 | |
![]() |
9ff9a4d848 | |
![]() |
88605153c8 | |
![]() |
053ea95fcb | |
![]() |
049a2c7df9 | |
![]() |
70530adc95 | |
![]() |
2de8d9205d | |
![]() |
bc9b8ef03b | |
![]() |
a81e2f7fbb | |
![]() |
c158e21f3e | |
![]() |
1b8b828877 | |
![]() |
513066488e | |
![]() |
9525e7927a | |
![]() |
c277f36daa | |
![]() |
957f213451 | |
![]() |
566f1eb139 | |
![]() |
7881d9aad2 | |
![]() |
002cc31224 | |
![]() |
b680ae7f4c | |
![]() |
0f41bed748 | |
![]() |
877e7707ce | |
![]() |
7624975707 | |
![]() |
0c3e935963 | |
![]() |
72f7a68a56 | |
![]() |
e0df22c385 | |
|
b6af3f41d8 | |
|
5374fe6679 | |
![]() |
3921c3ead8 | |
![]() |
19b1df04b0 | |
![]() |
7ef37b76cd | |
![]() |
79cf60befb | |
![]() |
417a4580f9 | |
![]() |
eb3831f97d | |
![]() |
9c09213915 | |
![]() |
9fa145ed19 | |
![]() |
b945d4ab06 | |
![]() |
4aa809f80b | |
![]() |
7f1dcf4e62 | |
![]() |
17b2b4f68d | |
![]() |
ec81a82567 | |
![]() |
ac112250d0 | |
![]() |
5409212ffd | |
![]() |
baae0415e7 | |
![]() |
1f98512b86 | |
![]() |
c76bfb61bd | |
![]() |
62b0ac59e5 | |
![]() |
635dd6b7a8 | |
![]() |
d7389dd521 | |
![]() |
ecbc5fe3e8 | |
![]() |
b7bdc2521e | |
![]() |
ced0cdf495 | |
![]() |
f213d4da15 | |
![]() |
58930d85aa | |
![]() |
eaf2785986 | |
![]() |
aaa0091db8 | |
![]() |
ac33fbe4ff | |
![]() |
c6aa78fa0e | |
![]() |
051346789f | |
![]() |
b3273782b4 | |
![]() |
68b2fc2730 | |
![]() |
cc31af1062 |
|
@ -4,3 +4,9 @@ download/
|
||||||
dump*.json
|
dump*.json
|
||||||
preprocess/
|
preprocess/
|
||||||
tmp/
|
tmp/
|
||||||
|
.ipynb_checkpoints/
|
||||||
|
notebooks/logs/
|
||||||
|
.~lock.*#
|
||||||
|
log/
|
||||||
|
notebooks/model.png
|
||||||
|
bin/
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
# Makefile
|
||||||
|
|
||||||
|
prefix = /usr/local
|
||||||
|
bindir = $(prefix)/bin
|
||||||
|
|
||||||
|
all:
|
||||||
|
$(MAKE) -C src
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -fr bin/
|
||||||
|
|
||||||
|
install:
|
||||||
|
@cp -vp bin/* $(bindir)/
|
||||||
|
|
||||||
|
uninstall:
|
||||||
|
@rm -vf \
|
||||||
|
$(bindir)/wut \
|
||||||
|
$(bindir)/wut-aria-active \
|
||||||
|
$(bindir)/wut-aria-add \
|
||||||
|
$(bindir)/wut-aria-daemon \
|
||||||
|
$(bindir)/wut-aria-info \
|
||||||
|
$(bindir)/wut-aria-methods \
|
||||||
|
$(bindir)/wut-aria-shutdown \
|
||||||
|
$(bindir)/wut-aria-stat \
|
||||||
|
$(bindir)/wut-aria-stopped \
|
||||||
|
$(bindir)/wut-aria-waiting \
|
||||||
|
$(bindir)/wut-audio-archive \
|
||||||
|
$(bindir)/wut-audio-sha1 \
|
||||||
|
$(bindir)/wut-compare \
|
||||||
|
$(bindir)/wut-compare-all \
|
||||||
|
$(bindir)/wut-compare-tx \
|
||||||
|
$(bindir)/wut-compare-txmode \
|
||||||
|
$(bindir)/wut-compare-txmode-csv \
|
||||||
|
$(bindir)/wut-dl-sort \
|
||||||
|
$(bindir)/wut-dl-sort-tx \
|
||||||
|
$(bindir)/wut-dl-sort-txmode \
|
||||||
|
$(bindir)/wut-dl-sort-txmode-all \
|
||||||
|
$(bindir)/wut-files \
|
||||||
|
$(bindir)/wut-files-data \
|
||||||
|
$(bindir)/wut-files-data-all \
|
||||||
|
$(bindir)/wut-ia-sha1 \
|
||||||
|
$(bindir)/wut-ia-torrents \
|
||||||
|
$(bindir)/wut-img-ck.py \
|
||||||
|
$(bindir)/wut-ml \
|
||||||
|
$(bindir)/wut-ml-auto \
|
||||||
|
$(bindir)/wut-ml-load \
|
||||||
|
$(bindir)/wut-ml-save \
|
||||||
|
$(bindir)/wut-obs \
|
||||||
|
$(bindir)/wut-ogg2wav \
|
||||||
|
$(bindir)/wut-review-staging \
|
||||||
|
$(bindir)/wut-rm-random \
|
||||||
|
$(bindir)/wut-tf \
|
||||||
|
$(bindir)/wut-tf.py \
|
||||||
|
$(bindir)/wut-water \
|
||||||
|
$(bindir)/wut-water-range \
|
||||||
|
$(bindir)/wut-worker \
|
||||||
|
$(bindir)/wut-worker-mas \
|
||||||
|
$(bindir)/wut-worker-mas.py \
|
||||||
|
$(bindir)/wut-worker.py \
|
||||||
|
|
303
README.md
303
README.md
|
@ -1,58 +1,83 @@
|
||||||
# wut?
|
# wut?
|
||||||
`wut` --- What U Think? SatNOGS Observation AI.
|
`wut` --- What U Think? SatNOGS Observation AI.
|
||||||
|
|
||||||
|
|
||||||
|
Website:
|
||||||
|
* https://wut.spacecruft.org
|
||||||
|
|
||||||
|
|
||||||
# satnogs-wut
|
# satnogs-wut
|
||||||
|
![Image](pics/wut-web.png)
|
||||||
|
|
||||||
The goal of satnogs-wut is to have a script that will take an
|
The goal of satnogs-wut is to have a script that will take an
|
||||||
observation ID and return an answer whether the observation is
|
observation ID and return an answer whether the observation is
|
||||||
"good", "bad", or "failed".
|
"good", "bad", or "failed".
|
||||||
|
|
||||||
## Good Observation
|
## Good Observation
|
||||||
<div>
|
![Image](pics/waterfall-good.png)
|
||||||
<img src="satnogs-wut/media/branch/master/pics/waterfall-good.png" width="300"/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
## Bad Observation
|
## Bad Observation
|
||||||
<div>
|
![Image](pics/waterfall-bad.png)
|
||||||
<img src="satnogs-wut/media/branch/master/pics/waterfall-bad.png" width="300"/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
## Failed Observation
|
## Failed Observation
|
||||||
<div>
|
![Image](pics/waterfall-failed.png)
|
||||||
<img src="satnogs-wut/media/branch/master/pics/waterfall-failed.png" width="300"/>
|
|
||||||
</div>
|
## wut Web
|
||||||
|
Main site:
|
||||||
|
* https://wut.spacecruft.org/
|
||||||
|
|
||||||
|
Source code:
|
||||||
|
* https://spacecruft.org/spacecruft/satnogs-wut
|
||||||
|
|
||||||
|
Beta (test) site:
|
||||||
|
|
||||||
|
* https://wut-beta.spacecruft.org/
|
||||||
|
|
||||||
|
Alpha (development) site:
|
||||||
|
|
||||||
|
* https://wut-alpha.spacecruft.org/
|
||||||
|
|
||||||
## Observations
|
## Observations
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
* https://wiki.satnogs.org/Operation
|
* https://wiki.satnogs.org/Operation
|
||||||
* https://wiki.satnogs.org/Rating_Observations
|
* https://wiki.satnogs.org/Observe
|
||||||
* https://wiki.satnogs.org/Taxonomy_of_Observations
|
* https://wiki.satnogs.org/Observations
|
||||||
|
* https://wiki.satnogs.org/Category:RF_Modes
|
||||||
* Sample observation: https://network.satnogs.org/observations/1456893/
|
* Sample observation: https://network.satnogs.org/observations/1456893/
|
||||||
|
|
||||||
# Machine Learning
|
# Machine Learning
|
||||||
The system at present is built upon the following:
|
The system at present is built upon the following:
|
||||||
|
|
||||||
* Debian Buster.
|
* Debian Bookworm (testing/12).
|
||||||
* Tensorflow 2.1 with built-in Keras.
|
* Tensorflow.
|
||||||
* Jupyter Lab.
|
* Jupyter Lab.
|
||||||
|
* Voila.
|
||||||
|
|
||||||
Learning/testing, results are ~~inaccurate~~ getting closer.
|
Learning/testing, results are good.
|
||||||
The main AI/ML development is now being done in Jupyter.
|
The main AI/ML development is being done in Jupyter.
|
||||||
|
|
||||||
# Jupyter
|
# Jupyter
|
||||||
There is a Jupyter Lab Notebook file.
|
There Jupyter Lab Notebook files in the `notebooks/` subdirectory.
|
||||||
This is producing real results at present, but has a long ways to go still...
|
These are producing usable results. Voila is used to convert
|
||||||
|
Jupyter notebooks into websites.
|
||||||
|
|
||||||
|
* `wut.ipynb` --- Machine learning Python script using Tensorflow and Keras in a Jupyter Notebook.
|
||||||
|
* `wut-predict.ipynb` --- Make prediction (rating) of observation from pre-existing model.
|
||||||
|
* `wut-train.ipynb` --- Train models to be using by prediction engine.
|
||||||
|
* `wut-web.ipynb` --- Website: https://wut.spacecruft.org/
|
||||||
|
* `wut-web-beta.ipynb` --- Website: https://wut-beta.spacecruft.org/
|
||||||
|
* `wut-web-alpha.ipynb` --- Website: https://wut-alpha.spacecruft.org/
|
||||||
|
|
||||||
* `wut-ml.ipynb` --- Machine learning Python script using Tensorflow and Keras in a Jupyter Notebook.
|
|
||||||
* `wut-predict.ipynb` --- Make prediction (rating) of observation, using `data/wut.h5`.
|
|
||||||
* `wut-train.ipynb` --- ML Training file saved to `data/wut.h5`.
|
|
||||||
|
|
||||||
# wut scripts
|
# wut scripts
|
||||||
The following scripts are in the repo:
|
The following scripts are in the repo.
|
||||||
|
|
||||||
* `wut` --- Feed it an observation ID and it returns if it is a "good", "bad", or "failed" observation.
|
* `wut` --- Feed it an observation ID and it returns if it is a "good", "bad", or "failed" observation.
|
||||||
|
* `wut-aria-add` --- Add a torrent from the Internet Archive to the aria daemon for downloading.
|
||||||
|
* `wut-aria-daemon` --- Run an aria daemon for torrent downloads from the Internet Archive.
|
||||||
* `wut-audio-archive` --- Downloads audio files from archive.org.
|
* `wut-audio-archive` --- Downloads audio files from archive.org.
|
||||||
|
* `wut-audio-sha1` --- Verifies sha1 checksums of files downloaded from archive.org.
|
||||||
* `wut-compare` --- Compare an observations' current presumably human vetting with a `wut` vetting.
|
* `wut-compare` --- Compare an observations' current presumably human vetting with a `wut` vetting.
|
||||||
* `wut-compare-all` --- Compare all the observations in `download/` with `wut` vettings.
|
* `wut-compare-all` --- Compare all the observations in `download/` with `wut` vettings.
|
||||||
* `wut-compare-tx` --- Compare all the observations in `download/` with `wut` vettings using selected transmitter UUID.
|
* `wut-compare-tx` --- Compare all the observations in `download/` with `wut` vettings using selected transmitter UUID.
|
||||||
|
@ -61,19 +86,34 @@ The following scripts are in the repo:
|
||||||
* `wut-dl-sort` --- Populate `data/` dir with waterfalls from `download/`.
|
* `wut-dl-sort` --- Populate `data/` dir with waterfalls from `download/`.
|
||||||
* `wut-dl-sort-tx` --- Populate `data/` dir with waterfalls from `download/` using selected transmitter UUID.
|
* `wut-dl-sort-tx` --- Populate `data/` dir with waterfalls from `download/` using selected transmitter UUID.
|
||||||
* `wut-dl-sort-txmode` --- Populate `data/` dir with waterfalls from `download/` using selected encoding.
|
* `wut-dl-sort-txmode` --- Populate `data/` dir with waterfalls from `download/` using selected encoding.
|
||||||
|
* `wut-dl-sort-txmode-all` --- Populate `data/` dir with waterfalls from `download/` using all encodings.
|
||||||
* `wut-files` --- Tells you about what files you have in `downloads/` and `data/`.
|
* `wut-files` --- Tells you about what files you have in `downloads/` and `data/`.
|
||||||
|
* `wut-files-data` --- Tells you about what files you have in `data/`.
|
||||||
|
* `wut-ia` --- Download SatNOGS data from the Internet Archive at `archive.org`.
|
||||||
|
* `wut-ia-torrents` --- Download SatNOGS torrents from the Internet Archive at `archive.org`.
|
||||||
|
* `wut-img-ck.py` --- Validate image files are not corrupt with PIL.
|
||||||
* `wut-ml` --- Main machine learning Python script using Tensorflow and Keras.
|
* `wut-ml` --- Main machine learning Python script using Tensorflow and Keras.
|
||||||
|
* `wut-ml-auto` --- Machine learning Python script using Tensorflow and Keras, auto.
|
||||||
* `wut-ml-load` --- Machine learning Python script using Tensorflow and Keras, load `data/wut.h5`.
|
* `wut-ml-load` --- Machine learning Python script using Tensorflow and Keras, load `data/wut.h5`.
|
||||||
* `wut-ml-save` --- Machine learning Python script using Tensorflow and Keras, save `data/wut.h5`.
|
* `wut-ml-save` --- Machine learning Python script using Tensorflow and Keras, save `data/wut.h5`.
|
||||||
* `wut-obs` --- Download the JSON for an observation ID.
|
* `wut-obs` --- Download the JSON for an observation ID.
|
||||||
* `wut-ogg2wav` --- Convert `.ogg` files in `downloads/` to `.wav` files.
|
* `wut-ogg2wav` --- Convert `.ogg` files in `downloads/` to `.wav` files.
|
||||||
|
* `wut-rm-random` --- Randomly deletes stuff. Very bad.
|
||||||
* `wut-review-staging` --- Review all images in `data/staging`.
|
* `wut-review-staging` --- Review all images in `data/staging`.
|
||||||
|
* `wut-tf` --- Shell script to set variables when launching `wut-tf.py`.
|
||||||
|
* `wut-tf.py` --- Distributed learning script to be run on multiple nodes.
|
||||||
* `wut-water` --- Download waterfall for an observation ID to `download/[ID]`.
|
* `wut-water` --- Download waterfall for an observation ID to `download/[ID]`.
|
||||||
* `wut-water-range` --- Download waterfalls for a range of observation IDs to `download/[ID]`.
|
* `wut-water-range` --- Download waterfalls for a range of observation IDs to `download/[ID]`.
|
||||||
|
* `wut-worker` --- Shell script to set variables when launching `wut-worker.py`.
|
||||||
|
* `wut-worker.py` --- Distributed training script to run on multiple nodes.
|
||||||
|
* `wut-worker-mas` --- Shell script to set variables when launching `wut-worker-mas.py`.
|
||||||
|
* `wut-worker-mas.py` --- Distributed training script to run on multiple nodes, alt version.
|
||||||
|
|
||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
Most of the scripts are simple shell scripts with few dependencies.
|
Installation notes...
|
||||||
|
|
||||||
|
There's more docs on a few different setups in the `docs/` subdir.
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
The scripts use files that are ignored in the git repo.
|
The scripts use files that are ignored in the git repo.
|
||||||
|
@ -92,57 +132,56 @@ mkdir -p data/test/unvetted
|
||||||
```
|
```
|
||||||
|
|
||||||
## Debian Packages
|
## Debian Packages
|
||||||
You'll need `curl` and `jq`, both in Debian's repos.
|
Install dependencies from Debian.
|
||||||
|
|
||||||
```
|
```
|
||||||
apt update
|
sudo apt update
|
||||||
apt install curl jq
|
sudo apt install curl jq python3-pip graphviz
|
||||||
```
|
```
|
||||||
|
|
||||||
## Install Tensorflow
|
## Install Python Packages
|
||||||
For the machine learning scripts, like `wut-ml`, Tensorflow
|
For the machine learning scripts, like `wut-ml`, Tensorflow
|
||||||
needs to be installed.
|
needs to be installed.
|
||||||
As of version 2 of Tensorflow, Keras no longer needs to be
|
|
||||||
installed separately.
|
|
||||||
|
|
||||||
|
|
||||||
The verions of Tensorflow installed with `pip3` on Debian
|
|
||||||
Buster crashes. It is perhaps best to do a custom install,
|
|
||||||
best preferred build options, of the most preferred version.
|
|
||||||
At this point, the `remotes/origin/r2.1` branch is preferred.
|
|
||||||
|
|
||||||
|
|
||||||
To install Tensorflow:
|
|
||||||
|
|
||||||
* https://www.tensorflow.org/install/source
|
|
||||||
|
|
||||||
1. Install dependencies in Debian.
|
|
||||||
|
|
||||||
1. Install Bazel to build Tensorflow.
|
|
||||||
|
|
||||||
1. Build Tensorflow pip package.
|
|
||||||
|
|
||||||
1. Install Tensorflow from custom pip package.
|
|
||||||
|
|
||||||
|
You need to add `~/.local/bin` to your `$PATH`:
|
||||||
|
|
||||||
```
|
```
|
||||||
# Install deps
|
echo 'PATH=~/.local/bin:$PATH' >> ~/.bashrc
|
||||||
apt update
|
```
|
||||||
apt install python3-pip
|
|
||||||
# Install bazel .deb from releases here:
|
Then log out and back in, or reload ala:
|
||||||
firefox https://github.com/bazelbuild/bazel/releases
|
```
|
||||||
# Install Tensorflow
|
. ~/.bashrc
|
||||||
git clone tensorflow...
|
```
|
||||||
cd tensorflow
|
|
||||||
git checkout remotes/origin/r2.1
|
Update pip to latest pretty version, in local directory.
|
||||||
./configure
|
Vary Python package install, suited to taste.
|
||||||
# Run Bazel to build pip package. Takes nearly 2 hours to build.
|
|
||||||
bazel build --config=opt //tensorflow/tools/pip_package:build_pip_package
|
```
|
||||||
./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
|
pip install --user --upgrade pip
|
||||||
pip3 install --user /tmp/tensorflow_pkg/tensorflow-2.1.0-cp37-cp37m-linux_x86_64.whl
|
```
|
||||||
|
|
||||||
|
Make sure you have right pip:
|
||||||
|
```
|
||||||
|
debian@workstation:~$ which pip
|
||||||
|
/home/debian/.local/bin/pip
|
||||||
|
```
|
||||||
|
|
||||||
|
Install Python packages:
|
||||||
|
```
|
||||||
|
pip install --user --upgrade -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
Make and install `satnogs-wut`:
|
||||||
|
|
||||||
|
```
|
||||||
|
make
|
||||||
|
sudo make install
|
||||||
```
|
```
|
||||||
|
|
||||||
### Tensorflow KVM Notes
|
### Tensorflow KVM Notes
|
||||||
|
Note, for KVM, pass cpu=host if host has "avx" in `/proc/cpuinfo`.
|
||||||
|
|
||||||
Recent versions of Tensorflow can handle many more CPU build options
|
Recent versions of Tensorflow can handle many more CPU build options
|
||||||
to optimize for speed, such as
|
to optimize for speed, such as
|
||||||
[AVX](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions).
|
[AVX](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions).
|
||||||
|
@ -154,81 +193,13 @@ For more info about this in Proxmox, see
|
||||||
If you don't have this enabled, CPU instructions will fail or
|
If you don't have this enabled, CPU instructions will fail or
|
||||||
Tensorflow will run slower than it could.
|
Tensorflow will run slower than it could.
|
||||||
|
|
||||||
### Tensor Configuration
|
|
||||||
```
|
|
||||||
$ ./configure
|
|
||||||
WARNING: --batch mode is deprecated. Please instead explicitly shut down your Bazel server using the command "bazel shutdown".
|
|
||||||
You have bazel 0.29.1 installed.
|
|
||||||
Please specify the location of python. [Default is /usr/bin/python3]:
|
|
||||||
|
|
||||||
|
## Jupyter
|
||||||
Found possible Python library paths:
|
|
||||||
/usr/lib/python3/dist-packages
|
|
||||||
/usr/local/lib/python3.7/dist-packages
|
|
||||||
Please input the desired Python library path to use. Default is [/usr/lib/python3/dist-packages]
|
|
||||||
|
|
||||||
Do you wish to build TensorFlow with XLA JIT support? [Y/n]:
|
|
||||||
XLA JIT support will be enabled for TensorFlow.
|
|
||||||
|
|
||||||
Do you wish to build TensorFlow with OpenCL SYCL support? [y/N]:
|
|
||||||
No OpenCL SYCL support will be enabled for TensorFlow.
|
|
||||||
|
|
||||||
Do you wish to build TensorFlow with ROCm support? [y/N]:
|
|
||||||
No ROCm support will be enabled for TensorFlow.
|
|
||||||
|
|
||||||
Do you wish to build TensorFlow with CUDA support? [y/N]:
|
|
||||||
No CUDA support will be enabled for TensorFlow.
|
|
||||||
|
|
||||||
Do you wish to download a fresh release of clang? (Experimental) [y/N]:
|
|
||||||
Clang will not be downloaded.
|
|
||||||
|
|
||||||
Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native -Wno-sign-compare]: -march=native -mssse3 -mcx16 -msse4.1 -msse4.2 -mpopcnt -mavx
|
|
||||||
|
|
||||||
|
|
||||||
Would you like to interactively configure ./WORKSPACE for Android builds? [y/N]:
|
|
||||||
Not configuring the WORKSPACE for Android builds.
|
|
||||||
|
|
||||||
Preconfigured Bazel build configs. You can use any of the below by adding "--config=<>" to your build command. See .bazelrc for more details.
|
|
||||||
--config=mkl # Build with MKL support.
|
|
||||||
--config=monolithic # Config for mostly static monolithic build.
|
|
||||||
--config=ngraph # Build with Intel nGraph support.
|
|
||||||
--config=numa # Build with NUMA support.
|
|
||||||
--config=dynamic_kernels # (Experimental) Build kernels into separate shared objects.
|
|
||||||
--config=v2 # Build TensorFlow 2.x instead of 1.x.
|
|
||||||
Preconfigured Bazel build configs to DISABLE default on features:
|
|
||||||
--config=noaws # Disable AWS S3 filesystem support.
|
|
||||||
--config=nogcp # Disable GCP support.
|
|
||||||
--config=nohdfs # Disable HDFS support.
|
|
||||||
--config=nonccl # Disable NVIDIA NCCL support.
|
|
||||||
Configuration finished
|
|
||||||
```
|
|
||||||
|
|
||||||
## KVM
|
|
||||||
Note, for KVM, pass cpu=host if host has "avx" in `/proc/cpuinfo`.
|
|
||||||
|
|
||||||
## Install Jupyter
|
|
||||||
Jupyter is a cute little web interface that makes Python programming
|
Jupyter is a cute little web interface that makes Python programming
|
||||||
easy. It works well for machine learning because you can step through
|
easy. It works well for machine learning because you can step through
|
||||||
just parts of the code, changing variables and immediately seeing
|
just parts of the code, changing variables and immediately seeing
|
||||||
output in the web browser.
|
output in the web browser.
|
||||||
|
|
||||||
Probably installed like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
pip3 install --user jupyterlab
|
|
||||||
# Also other good packages, maybe like:
|
|
||||||
pip3 install --user jupyter-tensorboard
|
|
||||||
pip3 list | grep jupyter
|
|
||||||
# returns:
|
|
||||||
jupyter 1.0.0
|
|
||||||
jupyter-client 5.3.4
|
|
||||||
jupyter-console 6.0.0
|
|
||||||
jupyter-core 4.6.1
|
|
||||||
jupyter-tensorboard 0.1.10
|
|
||||||
jupyterlab 1.2.4
|
|
||||||
jupyterlab-server 1.0.6
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
# Usage
|
# Usage
|
||||||
The main purpose of the script is to evaluate an observation,
|
The main purpose of the script is to evaluate an observation,
|
||||||
|
@ -257,12 +228,6 @@ The following steps need to be performed:
|
||||||
|
|
||||||
1. Rate an observation using the `wut` script.
|
1. Rate an observation using the `wut` script.
|
||||||
|
|
||||||
# ml.spacecruft.org
|
|
||||||
This server is processing the data and has directories available
|
|
||||||
to sync.
|
|
||||||
|
|
||||||
* https://ml.spacecruft.org/
|
|
||||||
|
|
||||||
## Data Caching Downloads
|
## Data Caching Downloads
|
||||||
The scripts are designed to not download a waterfall or make a JSON request
|
The scripts are designed to not download a waterfall or make a JSON request
|
||||||
for an observation it has already requested. The first time an observation
|
for an observation it has already requested. The first time an observation
|
||||||
|
@ -280,64 +245,30 @@ Files in the `preprocess/` directory have been preprocessed to be used
|
||||||
further in the pipeline. This contains `.wav` files that have been
|
further in the pipeline. This contains `.wav` files that have been
|
||||||
decoded from `.ogg` files.
|
decoded from `.ogg` files.
|
||||||
|
|
||||||
|
## Internet Archive Downloads
|
||||||
|
The Internet Archive has a mirror of data from the SatNOGS network.
|
||||||
|
It is better to download from there to save on Libre Space Foundation
|
||||||
|
resources.
|
||||||
|
|
||||||
## SatNOGS Observation Data Mirror
|
* https://archive.org/details/satnogs
|
||||||
The downloaded waterfalls are available below via `http` and `rsync`.
|
|
||||||
Use this instead of downloading from SatNOGS to save their bandwidth.
|
To download, perhaps do something like the following.
|
||||||
|
Get an account at archive.org, then run this to set up your account locally:
|
||||||
|
|
||||||
```
|
```
|
||||||
# Something like:
|
ia configure
|
||||||
wget --mirror https://ml.spacecruft.org/download
|
|
||||||
# Or with rsync:
|
|
||||||
mkdir download
|
|
||||||
rsync -ultav rsync://ml.spacecruft.org/download/ download/
|
|
||||||
```
|
```
|
||||||
|
|
||||||
# TODO / Brainstorms
|
To download all the SatNOGS collections `.torrent` files from the
|
||||||
This is a first draft of how to do this. The actual machine learning
|
Internet Archive, run:
|
||||||
process hasn't been looked at at all, except to get it to generate
|
|
||||||
an answer. It has a long ways to go. There are also many ways to do
|
|
||||||
this besides using Tensorflow and Keras. Originally, I considered
|
|
||||||
using OpenCV. Ideas in no particular order below.
|
|
||||||
|
|
||||||
## General
|
```
|
||||||
General considerations.
|
wut-ia-torrents
|
||||||
|
```
|
||||||
* Use Open CV.
|
|
||||||
|
|
||||||
* Use something other than Tensorflow / Keras.
|
|
||||||
|
|
||||||
* Do mirror of `network.satnogs.org` and do API calls to it for data.
|
|
||||||
|
|
||||||
* Issues are now available here:
|
|
||||||
* https://spacecruft.org/spacecruft/satnogs-wut/issues
|
|
||||||
|
|
||||||
## Tensorflow / Keras
|
|
||||||
At present Tensorflow and Keras are used.
|
|
||||||
|
|
||||||
* Learn Keras / Tensorflow...
|
|
||||||
|
|
||||||
* What part of image is being evaluated?
|
|
||||||
|
|
||||||
* Re-evaluate each step.
|
|
||||||
|
|
||||||
* Right now the prediction output is just "good" or "bad", needs
|
|
||||||
"failed" too.
|
|
||||||
|
|
||||||
* Give confidence score in each prediction.
|
|
||||||
|
|
||||||
* Visualize what ML is looking at.
|
|
||||||
|
|
||||||
* Separate out good/bad/failed by satellite, transmitter, or encoding.
|
|
||||||
This way "good" isn't considering a "good" vetting to be a totally
|
|
||||||
different encoding. Right now, it is considering as good observations
|
|
||||||
that should be bad...
|
|
||||||
|
|
||||||
* If it has a low confidence, return "unknown" instead of "good" or "bad".
|
|
||||||
|
|
||||||
|
|
||||||
# Caveats
|
# Caveats
|
||||||
This is nearly the first machine learning script I've done,
|
This is the first artificial intelligence script I've done,
|
||||||
I know little about radio and less about satellites,
|
I know little about radio and less about satellites,
|
||||||
and I'm not a programmer.
|
and I'm not a programmer.
|
||||||
|
|
||||||
|
@ -350,4 +281,4 @@ Main repository is available here:
|
||||||
|
|
||||||
License: CC By SA 4.0 International and/or GPLv3+ at your discretion. Other code licensed under their own respective licenses.
|
License: CC By SA 4.0 International and/or GPLv3+ at your discretion. Other code licensed under their own respective licenses.
|
||||||
|
|
||||||
Copyright (C) 2019, 2020, Jeff Moe
|
Copyright (C) 2019, 2020, 2022 Jeff Moe
|
||||||
|
|
|
@ -33,22 +33,17 @@ they are created.
|
||||||
```
|
```
|
||||||
# On main workstation or node where you built tensorflow:
|
# On main workstation or node where you built tensorflow:
|
||||||
NODES="ml1 ml2 ml3 ml4 ml5"
|
NODES="ml1 ml2 ml3 ml4 ml5"
|
||||||
for i in $NODES
|
|
||||||
do scp -p tensorflow-2.1.0-cp37-cp37m-linux_x86_64.whl $i:
|
|
||||||
done
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
# On worker nodes:
|
# On worker nodes:
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install python3-pip sshfs
|
sudo apt install python3-pip sshfs jq
|
||||||
# XXX deps...
|
pip3 install --upgrade --user pip
|
||||||
pip3 install --upgrade setuptools
|
# make sure new `pip3` at `~/.local/bin/pip3` is in front in `$PATH`.
|
||||||
pip3 install --user tensorflow-2.1.0-cp37-cp37m-linux_x86_64.whl
|
pip3 install --upgrade --user -r requirements-node.txt
|
||||||
pip3 install --user simplejson
|
|
||||||
pip3 install --user pillow
|
# If you have cloned the tensorflow repo, test with:
|
||||||
|
#python3 ~/devel/tensorflow/tensorflow/tensorflow/python/distribute/multi_worker_continuous_run_test.py
|
||||||
```
|
```
|
||||||
|
|
||||||
# Usage
|
|
||||||
`top`
|
|
||||||
|
|
|
@ -0,0 +1,202 @@
|
||||||
|
# PyTorch
|
||||||
|
PyTorch is an alternative to TensorFlow.
|
||||||
|
|
||||||
|
If using a KVM, be sure CPU type is set to `host`.
|
||||||
|
|
||||||
|
## Get Source
|
||||||
|
Get PyTorch source code:
|
||||||
|
|
||||||
|
```
|
||||||
|
# This is about 1 gig:
|
||||||
|
git clone --recursive https://github.com/pytorch/pytorch
|
||||||
|
```
|
||||||
|
|
||||||
|
The recursive git repos contain a mix of permissive licenses, mostly
|
||||||
|
BSD, MIT, Apache style. No GPL. License owners are mostly Google
|
||||||
|
and Facebook, with a mix of many others.
|
||||||
|
|
||||||
|
## Build Py
|
||||||
|
Build from scratch with free software options.
|
||||||
|
PyTorch has a `CMakeLists.txt`, so lets see how it goes with a `cmake` build....
|
||||||
|
|
||||||
|
Install build dependencies:
|
||||||
|
```
|
||||||
|
apt install cmake cmake-curses-gui g++ python-yaml python-typing
|
||||||
|
# Note, it uses python-yaml, not python3-yaml...
|
||||||
|
|
||||||
|
# Optional deps:
|
||||||
|
apt install doxygen
|
||||||
|
apt install libfftw3-dev
|
||||||
|
apt install libgmp3-dev
|
||||||
|
apt install libmpfr-dev
|
||||||
|
apt install libmkldnn-dev
|
||||||
|
apt install libnuma-dev # Nope, not right one
|
||||||
|
# Optional deps for BLAS OpenBLAS
|
||||||
|
apt install libopenblas-dev
|
||||||
|
# OpenCV
|
||||||
|
apt install libopencv-dev
|
||||||
|
# pybind?
|
||||||
|
apt install pybind11-dev
|
||||||
|
pybind11_INCLUDE_DIR /usr/include/pybind11
|
||||||
|
|
||||||
|
# BLAS Eigen
|
||||||
|
apt install libeigen3-dev # fail?
|
||||||
|
# ffmpeg
|
||||||
|
apt install libavcodec-dev libavdevice-dev libavfilter-dev libavformat-dev libavresample-dev libavutil-dev libpostproc-dev libswresample-dev libswscale-dev ffmpeg
|
||||||
|
# Thusly:
|
||||||
|
USE_FFMPEG ON
|
||||||
|
FFMPEG_AVCODEC_INCLUDE_DIR /usr/include/x86_64-linux-gnu/libavcodec
|
||||||
|
FFMPEG_LIBAVCODEC /usr/lib/x86_64-linux-gnu
|
||||||
|
FFMPEG_LIBAVFORMAT /usr/lib/x86_64-linux-gnu
|
||||||
|
FFMPEG_LIBAVUTIL /usr/lib/x86_64-linux-gnu
|
||||||
|
FFMPEG_LIBSWSCALE /usr/lib/x86_64-linux-gnu
|
||||||
|
# or?
|
||||||
|
FFMPEG_AVCODEC_INCLUDE_DIR /usr/include/x86_64-linux-gnu/libavcodec
|
||||||
|
FFMPEG_LIBAVCODEC /usr/lib/x86_64-linux-gnu/libavcodec.so
|
||||||
|
FFMPEG_LIBAVFORMAT /usr/lib/x86_64-linux-gnu/libavformat.so
|
||||||
|
FFMPEG_LIBAVUTIL /usr/lib/x86_64-linux-gnu/libavutil.so
|
||||||
|
FFMPEG_LIBSWSCALE /usr/lib/x86_64-linux-gnu/libswscale.so
|
||||||
|
|
||||||
|
|
||||||
|
# XXX
|
||||||
|
TORCH_BUILD_VERSION default is 1.1.0
|
||||||
|
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake ..
|
||||||
|
ccmake ..
|
||||||
|
# build takes ~30 minutes
|
||||||
|
make -j8
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
#cmake note
|
||||||
|
Generated cmake files are only fully tested if one builds with system glog,
|
||||||
|
gflags, and protobuf. Other settings may generate files that are not well
|
||||||
|
tested.
|
||||||
|
|
||||||
|
# so maybe:
|
||||||
|
apt install libgflags-dev
|
||||||
|
apt install libprotobuf-dev
|
||||||
|
# glog?
|
||||||
|
apt install libgoogle-glog-dev # ???
|
||||||
|
```
|
||||||
|
|
||||||
|
Or:
|
||||||
|
|
||||||
|
```
|
||||||
|
python setup.py install
|
||||||
|
```
|
||||||
|
|
||||||
|
How docs say:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
||||||
|
python setup.py build --cmake-only
|
||||||
|
ccmake build # or cmake-gui build
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Notable build options:
|
||||||
|
|
||||||
|
```
|
||||||
|
BLAS --- has multiple options:
|
||||||
|
MKL (non-free bits possible)
|
||||||
|
vecLib (?)
|
||||||
|
FLAME (?)
|
||||||
|
Eigen (in Debian)
|
||||||
|
ATLAS (in Debian?)
|
||||||
|
OpenBLAS (in Debian)
|
||||||
|
```
|
||||||
|
|
||||||
|
More options:
|
||||||
|
|
||||||
|
```
|
||||||
|
BUILD_BINARY ON
|
||||||
|
BUILD_PYTHON ON
|
||||||
|
OPENMP_FOUND ON
|
||||||
|
USE_CUDA OFF
|
||||||
|
USE_CUDNN OFF
|
||||||
|
USE_DISTRIBUTED ON
|
||||||
|
USE_FFMPEG ON
|
||||||
|
USE_MKLDNN ON ?? with Debian's packages ?
|
||||||
|
USE_MKLDNN_CBLAS NO ?
|
||||||
|
USE_LEVELDB ?? Available in debian.
|
||||||
|
USE_LMDB ?? Available in Debian.
|
||||||
|
USE_MPI ON heh
|
||||||
|
USE_NCCL OFF nvidia
|
||||||
|
USE_NUMA ON ?
|
||||||
|
USE_NUMPY ON
|
||||||
|
USE_OBSERVERS ON ?
|
||||||
|
USE_OPENCL ON ?
|
||||||
|
USE_OPENCV ON ?
|
||||||
|
USE_OPENMP ON ?
|
||||||
|
USE_REDIS ?
|
||||||
|
USE_ROCKSDB Available in Debian, like leveldb and lmdb
|
||||||
|
USE_ZMQ Available in Debian, messaging.
|
||||||
|
USE_ZSTD Available in Debian, compression.
|
||||||
|
WITH_BLAS ?
|
||||||
|
WITH_OPENMP ON
|
||||||
|
CAFFE2_USE_MSVC_STATIC_RUNTIME OFF ??
|
||||||
|
BUILD_CAFFE2_MOBILE OFF
|
||||||
|
```
|
||||||
|
|
||||||
|
# More misc...
|
||||||
|
```
|
||||||
|
git checkout v1.4.0
|
||||||
|
apt install python3-dev python3-numpy python-numpy
|
||||||
|
# uh
|
||||||
|
apt install libcaffe-cpu-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
Seems to be using not python3??
|
||||||
|
|
||||||
|
|
||||||
|
# Misc
|
||||||
|
```
|
||||||
|
[E init_intrinsics_check.cc:43] CPU feature avx is present on your machine, but the Caffe2 binary is not compiled with it. It means you may not get the full speed of your CPU.
|
||||||
|
```
|
||||||
|
|
||||||
|
# Build PyTorch `pip`
|
||||||
|
|
||||||
|
Lets get old Python 2 out of here:
|
||||||
|
|
||||||
|
```
|
||||||
|
apt autoremove --purge python2 python2-minimal python2.7 python2.7-minimal libpython2.7 libpython2.7-minimal libpython2.7-stdlib
|
||||||
|
git checkout v1.4.0
|
||||||
|
apt install python3-pip python3-setuptools
|
||||||
|
# Docs recommend this, but mkl is proprietary:
|
||||||
|
pip3 install --user numpy ninja pyyaml mkl mkl-include setuptools cmake cffi
|
||||||
|
# Try:
|
||||||
|
pip3 install --user --upgrade pip
|
||||||
|
# Set that new pip in your path, ~/.local/bin/ in ~/.bashrc:
|
||||||
|
export PATH="~/.local/bin:/usr/lib/ccache:$PATH"
|
||||||
|
|
||||||
|
pip3 install --user cmake
|
||||||
|
pip3 install --user numpy ninja pyyaml setuptools cmake cffi
|
||||||
|
# From upstream docs:
|
||||||
|
pip install torch==1.4.0+cpu torchvision==0.5.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
||||||
|
```
|
||||||
|
|
||||||
|
# Build with Python Setup
|
||||||
|
|
||||||
|
```
|
||||||
|
git submodule sync
|
||||||
|
git submodule update --init --recursive
|
||||||
|
python3 setup.py build --cmake-only
|
||||||
|
ccmake build
|
||||||
|
python3 setup.py install --user
|
||||||
|
```
|
||||||
|
|
||||||
|
# Proprietary Bits
|
||||||
|
Parts to avoid in the PyTorch ecosystem:
|
||||||
|
|
||||||
|
* Anaconda is a package manager for Python.The Anaconda repository contains non-free software,
|
||||||
|
so don't use it.
|
||||||
|
|
||||||
|
* "MKL" (Intel's non-free binaries).
|
||||||
|
|
||||||
|
* Note, these MKL packages are in Debian: `libmkldnn0 libmkldnn-dev libmkldnn-doc`
|
||||||
|
|
||||||
|
* Intel compiler.
|
||||||
|
|
|
@ -0,0 +1,134 @@
|
||||||
|
# TensorFlow Serving
|
||||||
|
HOWTO Set up and run TensorFlow Serving.
|
||||||
|
This is to this particular configuration.
|
||||||
|
|
||||||
|
# Software
|
||||||
|
Main software in use:
|
||||||
|
|
||||||
|
* Debian
|
||||||
|
* Proxmox
|
||||||
|
* Ceph
|
||||||
|
* Python 3
|
||||||
|
* TensorFlow Serving
|
||||||
|
|
||||||
|
# Installation
|
||||||
|
Install TensorFlow Serving. The recommended way is using `docker`, but
|
||||||
|
here we build from source.
|
||||||
|
|
||||||
|
* https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/setup.md
|
||||||
|
|
||||||
|
* https://github.com/tensorflow/serving/blob/master/tensorflow_serving/tools/docker/Dockerfile.devel
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Install Bazel
|
||||||
|
Instead of building Bazel, download the version that TensorFlow Serving builds with,
|
||||||
|
currently version `1.2.1` as shown in the docker builder.
|
||||||
|
|
||||||
|
Install Bazel dependencies:
|
||||||
|
|
||||||
|
```
|
||||||
|
# Note, this installs python 2.7....
|
||||||
|
apt install g++ zlib1g-dev unzip python
|
||||||
|
```
|
||||||
|
|
||||||
|
Install bazel .deb from releases here.
|
||||||
|
* firefox https://github.com/bazelbuild/bazel/releases
|
||||||
|
|
||||||
|
Note get the version Serving wants, not the latest release.
|
||||||
|
|
||||||
|
```
|
||||||
|
wget https://github.com/bazelbuild/bazel/releases/download/1.2.1/bazel_1.2.1-linux-x86_64.deb
|
||||||
|
wget https://github.com/bazelbuild/bazel/releases/download/1.2.1/bazel_1.2.1-linux-x86_64.deb.sha256
|
||||||
|
dpkg -i bazel_1.2.1-linux-x86_64.deb
|
||||||
|
apt -f install
|
||||||
|
```
|
||||||
|
|
||||||
|
## Install Dependencies
|
||||||
|
Dependencies. Note, there are likely fewer dependencies than listed
|
||||||
|
in the docs, since `bazel` is installed from `.deb`, not built.
|
||||||
|
|
||||||
|
```
|
||||||
|
apt update
|
||||||
|
apt install --no-install-recommends \
|
||||||
|
automake \
|
||||||
|
build-essential \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
git \
|
||||||
|
libcurl3-dev \
|
||||||
|
libfreetype6-dev \
|
||||||
|
libpng-dev \
|
||||||
|
libtool \
|
||||||
|
libzmq3-dev \
|
||||||
|
mlocate \
|
||||||
|
pkg-config \
|
||||||
|
python-dev \
|
||||||
|
software-properties-common \
|
||||||
|
swig \
|
||||||
|
unzip \
|
||||||
|
wget \
|
||||||
|
zip \
|
||||||
|
zlib1g-dev \
|
||||||
|
python3-distutils
|
||||||
|
```
|
||||||
|
|
||||||
|
Not installed:
|
||||||
|
```
|
||||||
|
openjdk-8-jdk \
|
||||||
|
openjdk-8-jre-headless \
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Compile TensorFlow Serving
|
||||||
|
HOWTO compile TensorFlow Serving:
|
||||||
|
|
||||||
|
```
|
||||||
|
git clone https://github.com/tensorflow/serving
|
||||||
|
cd serving
|
||||||
|
|
||||||
|
git checkout 2.1.0
|
||||||
|
|
||||||
|
bazel build --color=yes --curses=yes \
|
||||||
|
${TF_SERVING_BAZEL_OPTIONS} \
|
||||||
|
--verbose_failures \
|
||||||
|
--output_filter=DONT_MATCH_ANYTHING \
|
||||||
|
${TF_SERVING_BUILD_OPTIONS} \
|
||||||
|
tensorflow_serving/model_servers:tensorflow_model_server
|
||||||
|
```
|
||||||
|
|
||||||
|
Build `pip` package:
|
||||||
|
|
||||||
|
```
|
||||||
|
bazel build --color=yes --curses=yes \
|
||||||
|
${TF_SERVING_BAZEL_OPTIONS} \
|
||||||
|
--verbose_failures \
|
||||||
|
--output_filter=DONT_MATCH_ANYTHING \
|
||||||
|
${TF_SERVING_BUILD_OPTIONS} \
|
||||||
|
tensorflow_serving/tools/pip_package:build_pip_package
|
||||||
|
|
||||||
|
|
||||||
|
bazel-bin/tensorflow_serving/tools/pip_package/build_pip_package \
|
||||||
|
/tmp/pip
|
||||||
|
```
|
||||||
|
|
||||||
|
# Install TensorFlow Server with `pip`
|
||||||
|
Install with `pip`, don't use bazel, or build, etc.
|
||||||
|
|
||||||
|
```
|
||||||
|
# Set PATH, add to ~/.bashrc
|
||||||
|
export PATH="~/.local/bin:/usr/lib/ccache:$PATH"
|
||||||
|
. ~/.bashrc
|
||||||
|
sudo apt install python3-pip
|
||||||
|
pip3 install --upgrade --user pip
|
||||||
|
# Should return the one in ~/.local:
|
||||||
|
which pip3
|
||||||
|
# Install it, needs ~3 gigs free in /tmp
|
||||||
|
pip3 install --user tensorflow-serving-api
|
||||||
|
```
|
||||||
|
|
||||||
|
# Misc
|
||||||
|
See also:
|
||||||
|
|
||||||
|
* https://github.com/tobegit3hub/simple_tensorflow_serving
|
||||||
|
|
|
@ -0,0 +1,57 @@
|
||||||
|
# Voila
|
||||||
|
|
||||||
|
Voila is a way to turn Jupyter notebooks into web applications.
|
||||||
|
|
||||||
|
# Install
|
||||||
|
Start with basic Debian Buster install.
|
||||||
|
|
||||||
|
```
|
||||||
|
# set up partitions
|
||||||
|
# XXX deps...
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install apache2 python3-certbot-apache python3-pip sshfs npm nodejs
|
||||||
|
certbot
|
||||||
|
systemctl restart apache2
|
||||||
|
adduser wut
|
||||||
|
sudo su - wut
|
||||||
|
pip3 install --user --upgrade pip
|
||||||
|
# make sure new `pip3` at `~/.local/bin/pip3` is in front in `$PATH`.
|
||||||
|
echo 'PATH=~/.local/bin:$PATH' >> ~/.bashrc
|
||||||
|
```
|
||||||
|
|
||||||
|
logout #log back in as user wut
|
||||||
|
sudo su - wut
|
||||||
|
# Install Python packages for Voila
|
||||||
|
pip3 install --user --upgrade -r requirements-voila.txt
|
||||||
|
# Enable Jupyter extensions
|
||||||
|
jupyter nbextension enable --py widgetsnbextension
|
||||||
|
#jupyter labextension install @jupyter-widgets/jupyterlab-manager
|
||||||
|
#jupyter serverextension enable --py jupyterlab --user
|
||||||
|
```
|
||||||
|
|
||||||
|
* Set up hosts file, network, etc.
|
||||||
|
|
||||||
|
* Set up apache proxy
|
||||||
|
|
||||||
|
```
|
||||||
|
# Cruft to start voila:
|
||||||
|
cd /srv/satnogs/satnogs-wut/notebooks/
|
||||||
|
|
||||||
|
voila \
|
||||||
|
--ExecutePreprocessor.timeout=600 \
|
||||||
|
--no-browser \
|
||||||
|
--port=8867 \
|
||||||
|
--autoreload=True \
|
||||||
|
--Voila.ip=localhost \
|
||||||
|
--VoilaConfiguration.enable_nbextensions=False \
|
||||||
|
--theme=dark \
|
||||||
|
wut-web.ipynb \
|
||||||
|
1>>~/log/voila.log 2>>~/log/voila.err &
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
# wut?
|
||||||
|
Site:
|
||||||
|
|
||||||
|
* https://spacecruft.org/spacecruft/satnogs-wut/
|
||||||
|
|
Binary file not shown.
|
@ -1,654 +0,0 @@
|
||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 1,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# wut-train-cluster --- What U Think? SatNOGS Observation AI, training application cluster edition.\n",
|
|
||||||
"#\n",
|
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
|
||||||
"#\n",
|
|
||||||
"# Based on data/train and data/val directories builds a wut.tf file.\n",
|
|
||||||
"# GPLv3+\n",
|
|
||||||
"# Built using Jupyter, Tensorflow, Keras"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 2,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from __future__ import absolute_import, division, print_function, unicode_literals\n",
|
|
||||||
"from __future__ import print_function\n",
|
|
||||||
"import os\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import simplejson as json\n",
|
|
||||||
"import datetime"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 3,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import tensorflow as tf\n",
|
|
||||||
"import tensorflow.python.keras\n",
|
|
||||||
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
|
|
||||||
"from tensorflow.python.keras import optimizers\n",
|
|
||||||
"from tensorflow.python.keras import Sequential\n",
|
|
||||||
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
|
||||||
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
|
||||||
"from tensorflow.python.keras.layers import Input, concatenate\n",
|
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
|
||||||
"from tensorflow.python.keras.models import Model\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing import image\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import img_to_array\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import load_img"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 4,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%matplotlib inline\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"import seaborn as sns\n",
|
|
||||||
"from sklearn.decomposition import PCA\n",
|
|
||||||
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
|
||||||
"import ipywidgets as widgets\n",
|
|
||||||
"from IPython.display import display, Image"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 5,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"tf 2.1.0\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"print('tf {}'.format(tf.__version__))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 6,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"os.environ[\"TF_CONFIG\"] = json.dumps({\n",
|
|
||||||
" \"cluster\": {\n",
|
|
||||||
" \"worker\": [ \"10.100.100.130:2222\", \"ml1:2222\", \"ml2:2222\", \"ml3:2222\", \"ml4:2222\", \"ml5:2222\" ]\n",
|
|
||||||
" },\n",
|
|
||||||
" \"task\": {\"type\": \"worker\", \"index\": 0 },\n",
|
|
||||||
" \"num_workers\": 5\n",
|
|
||||||
"})"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 7,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"IMG_HEIGHT = 416\n",
|
|
||||||
"IMG_WIDTH= 804\n",
|
|
||||||
"batch_size = 32\n",
|
|
||||||
"epochs = 4\n",
|
|
||||||
"# Full size, machine barfs probably needs more RAM\n",
|
|
||||||
"#IMG_HEIGHT = 832\n",
|
|
||||||
"#IMG_WIDTH = 1606\n",
|
|
||||||
"# Good results\n",
|
|
||||||
"#batch_size = 128\n",
|
|
||||||
"#epochs = 6"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 8,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"tf.keras.backend.clear_session()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 9,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#from tensorflow.python.framework.ops import disable_eager_execution\n",
|
|
||||||
"#disable_eager_execution()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 10,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"INFO:tensorflow:Enabled multi-worker collective ops with available devices: ['/job:worker/replica:0/task:0/device:CPU:0', '/job:worker/replica:0/task:0/device:XLA_CPU:0']\n",
|
|
||||||
"INFO:tensorflow:Using MirroredStrategy with devices ('/job:worker/task:0',)\n",
|
|
||||||
"INFO:tensorflow:MultiWorkerMirroredStrategy with cluster_spec = {'worker': ['10.100.100.130:2222', 'ml1:2222', 'ml2:2222', 'ml3:2222', 'ml4:2222', 'ml5:2222']}, task_type = 'worker', task_id = 0, num_workers = 6, local_devices = ('/job:worker/task:0',), communication = CollectiveCommunication.RING\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(\n",
|
|
||||||
" tf.distribute.experimental.CollectiveCommunication.RING)\n",
|
|
||||||
"#\n",
|
|
||||||
"# MultiWorkerMirroredStrategy needs TF_CONFIG\n",
|
|
||||||
"#multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n",
|
|
||||||
"# Central Storage Strategy\n",
|
|
||||||
"#central_storage_strategy = tf.distribute.experimental.CentralStorageStrategy()\n",
|
|
||||||
"# ParameterServerStrategy needs TF_CONFIG\n",
|
|
||||||
"#ps_strategy = tf.distribute.experimental.ParameterServerStrategy()\n",
|
|
||||||
"# OneDeviceStrategy No cluster\n",
|
|
||||||
"#strategy = tf.distribute.OneDeviceStrategy(device=\"/CPU:0\")\n",
|
|
||||||
"# Mirrored Strategy\n",
|
|
||||||
"#mirrored_strategy = tf.distribute.MirroredStrategy()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 11,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"train_dir = os.path.join('data/', 'train')\n",
|
|
||||||
"val_dir = os.path.join('data/', 'val')\n",
|
|
||||||
"train_good_dir = os.path.join(train_dir, 'good')\n",
|
|
||||||
"train_bad_dir = os.path.join(train_dir, 'bad')\n",
|
|
||||||
"val_good_dir = os.path.join(val_dir, 'good')\n",
|
|
||||||
"val_bad_dir = os.path.join(val_dir, 'bad')\n",
|
|
||||||
"num_train_good = len(os.listdir(train_good_dir))\n",
|
|
||||||
"num_train_bad = len(os.listdir(train_bad_dir))\n",
|
|
||||||
"num_val_good = len(os.listdir(val_good_dir))\n",
|
|
||||||
"num_val_bad = len(os.listdir(val_bad_dir))\n",
|
|
||||||
"total_train = num_train_good + num_train_bad\n",
|
|
||||||
"total_val = num_val_good + num_val_bad"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 12,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"total training good images: 3291\n",
|
|
||||||
"total training bad images: 609\n",
|
|
||||||
"--\n",
|
|
||||||
"Total training images: 3900\n",
|
|
||||||
"total validation good images: 3361\n",
|
|
||||||
"total validation bad images: 601\n",
|
|
||||||
"--\n",
|
|
||||||
"Total validation images: 3962\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"print('total training good images:', num_train_good)\n",
|
|
||||||
"print('total training bad images:', num_train_bad)\n",
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Total training images:\", total_train)\n",
|
|
||||||
"print('total validation good images:', num_val_good)\n",
|
|
||||||
"print('total validation bad images:', num_val_bad)\n",
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Total validation images:\", total_val)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 13,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"--\n",
|
|
||||||
"Reduce training and validation set when testing\n",
|
|
||||||
"Reduced training images: 3900\n",
|
|
||||||
"Reduced validation images: 3962\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Reduce training and validation set when testing\")\n",
|
|
||||||
"#total_train = 16\n",
|
|
||||||
"#total_val = 16\n",
|
|
||||||
"print(\"Reduced training images:\", total_train)\n",
|
|
||||||
"print(\"Reduced validation images:\", total_val)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 14,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"Found 3900 images belonging to 2 classes.\n",
|
|
||||||
"Found 3962 images belonging to 2 classes.\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"train_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")\n",
|
|
||||||
"val_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")\n",
|
|
||||||
"train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,\n",
|
|
||||||
" directory=train_dir,\n",
|
|
||||||
" shuffle=True,\n",
|
|
||||||
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
|
||||||
" class_mode='binary')\n",
|
|
||||||
"val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,\n",
|
|
||||||
" directory=val_dir,\n",
|
|
||||||
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
|
||||||
" class_mode='binary')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 15,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"sample_train_images, _ = next(train_data_gen)\n",
|
|
||||||
"sample_val_images, _ = next(val_data_gen)\n",
|
|
||||||
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
|
|
||||||
"def plotImages(images_arr):\n",
|
|
||||||
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
|
||||||
" axes = axes.flatten()\n",
|
|
||||||
" for img, ax in zip( images_arr, axes):\n",
|
|
||||||
" ax.imshow(img)\n",
|
|
||||||
" ax.axis('off')\n",
|
|
||||||
" plt.tight_layout()\n",
|
|
||||||
" plt.show()\n",
|
|
||||||
" \n",
|
|
||||||
"#plotImages(sample_train_images[0:3])\n",
|
|
||||||
"#plotImages(sample_val_images[0:3])"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 16,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%load_ext tensorboard\n",
|
|
||||||
"!rm -rf ./clusterlogs/\n",
|
|
||||||
"#log_dir=\"clusterlogs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n",
|
|
||||||
"log_dir=\"clusterlogs\"\n",
|
|
||||||
"#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n",
|
|
||||||
"tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir)\n",
|
|
||||||
"#%tensorboard --logdir clusterlogs --port 6006"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 17,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#strategy.num_replicas_in_sync\n",
|
|
||||||
"## Compute global batch size using number of replicas.\n",
|
|
||||||
"#BATCH_SIZE_PER_REPLICA = 5\n",
|
|
||||||
"#print(BATCH_SIZE_PER_REPLICA)\n",
|
|
||||||
"#global_batch_size = (BATCH_SIZE_PER_REPLICA *\n",
|
|
||||||
"# strategy.num_replicas_in_sync)\n",
|
|
||||||
"#print(global_batch_size)\n",
|
|
||||||
"#dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100)\n",
|
|
||||||
"#dataset = dataset.batch(global_batch_size)\n",
|
|
||||||
"#LEARNING_RATES_BY_BATCH_SIZE = {5: 0.1, 10: 0.15}"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 18,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#learning_rate = LEARNING_RATES_BY_BATCH_SIZE[global_batch_size]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 19,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def get_uncompiled_model():\n",
|
|
||||||
" model = Sequential([\n",
|
|
||||||
" Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Conv2D(32, 3, padding='same', activation='relu'),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Conv2D(64, 3, padding='same', activation='relu'),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Flatten(),\n",
|
|
||||||
" Dense(512, activation='relu'),\n",
|
|
||||||
" Dense(1, activation='sigmoid')\n",
|
|
||||||
" ])\n",
|
|
||||||
" return model"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 20,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#get_uncompiled_model()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 21,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def get_compiled_model():\n",
|
|
||||||
" model = get_uncompiled_model()\n",
|
|
||||||
" model.compile(optimizer='adam',\n",
|
|
||||||
" loss='binary_crossentropy',\n",
|
|
||||||
" metrics=['accuracy'])\n",
|
|
||||||
" return model"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 22,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#model = get_compiled_model()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 23,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='tmp/keras-ckpt')]\n",
|
|
||||||
"#callbacks=[tensorboard_callback,callbacks]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 24,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#def get_fit_model():\n",
|
|
||||||
"# model = get_compiled_model()\n",
|
|
||||||
"# model.fit(\n",
|
|
||||||
"# train_data_gen,\n",
|
|
||||||
"# steps_per_epoch=total_train // batch_size,\n",
|
|
||||||
"# epochs=epochs,\n",
|
|
||||||
"# validation_data=val_data_gen,\n",
|
|
||||||
"# validation_steps=total_val // batch_size,\n",
|
|
||||||
"# verbose=2\n",
|
|
||||||
"# )\n",
|
|
||||||
"#return model"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 25,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#with strategy.scope():\n",
|
|
||||||
"# get_uncompiled_model()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 26,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#with strategy.scope():\n",
|
|
||||||
"# get_compiled_model()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 27,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#with strategy.scope():\n",
|
|
||||||
"# get_fit_model()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 28,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#multi_worker_model = get_compiled_model()\n",
|
|
||||||
"#multi_worker_model.fit(\n",
|
|
||||||
"# x=train_data_gen,\n",
|
|
||||||
"# epochs=epochs,\n",
|
|
||||||
"# steps_per_epoch=total_train // batch_size\n",
|
|
||||||
"# )"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"INFO:tensorflow:Collective batch_all_reduce: 1 all-reduces, num_workers = 6, communication_hint = RING\n",
|
|
||||||
"INFO:tensorflow:Collective batch_all_reduce: 1 all-reduces, num_workers = 6, communication_hint = RING\n",
|
|
||||||
"INFO:tensorflow:Running Distribute Coordinator with mode = 'independent_worker', cluster_spec = {'worker': ['10.100.100.130:2222', 'ml1:2222', 'ml2:2222', 'ml3:2222', 'ml4:2222', 'ml5:2222']}, task_type = 'worker', task_id = 0, environment = None, rpc_layer = 'grpc'\n",
|
|
||||||
"WARNING:tensorflow:`eval_fn` is not passed in. The `worker_fn` will be used if an \"evaluator\" task exists in the cluster.\n",
|
|
||||||
"WARNING:tensorflow:`eval_strategy` is not passed in. No distribution strategy will be used for evaluation.\n",
|
|
||||||
"INFO:tensorflow:Using MirroredStrategy with devices ('/job:worker/task:0',)\n",
|
|
||||||
"INFO:tensorflow:MultiWorkerMirroredStrategy with cluster_spec = {'worker': ['10.100.100.130:2222', 'ml1:2222', 'ml2:2222', 'ml3:2222', 'ml4:2222', 'ml5:2222']}, task_type = 'worker', task_id = 0, num_workers = 6, local_devices = ('/job:worker/task:0',), communication = CollectiveCommunication.RING\n",
|
|
||||||
"INFO:tensorflow:Using MirroredStrategy with devices ('/job:worker/task:0',)\n",
|
|
||||||
"INFO:tensorflow:MultiWorkerMirroredStrategy with cluster_spec = {'worker': ['10.100.100.130:2222', 'ml1:2222', 'ml2:2222', 'ml3:2222', 'ml4:2222', 'ml5:2222']}, task_type = 'worker', task_id = 0, num_workers = 6, local_devices = ('/job:worker/task:0',), communication = CollectiveCommunication.RING\n",
|
|
||||||
"WARNING:tensorflow:ModelCheckpoint callback is not provided. Workers will need to restart training if any fails.\n",
|
|
||||||
"WARNING:tensorflow:sample_weight modes were coerced from\n",
|
|
||||||
" ...\n",
|
|
||||||
" to \n",
|
|
||||||
" ['...']\n",
|
|
||||||
"WARNING:tensorflow:sample_weight modes were coerced from\n",
|
|
||||||
" ...\n",
|
|
||||||
" to \n",
|
|
||||||
" ['...']\n",
|
|
||||||
"Train for 121 steps, validate for 123 steps\n",
|
|
||||||
"Epoch 1/4\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"with strategy.scope():\n",
|
|
||||||
" model = get_compiled_model()\n",
|
|
||||||
" model.fit(\n",
|
|
||||||
" train_data_gen,\n",
|
|
||||||
" steps_per_epoch=total_train // batch_size,\n",
|
|
||||||
" epochs=epochs,\n",
|
|
||||||
" validation_data=val_data_gen,\n",
|
|
||||||
" validation_steps=total_val // batch_size,\n",
|
|
||||||
" verbose=2\n",
|
|
||||||
" )"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#with strategy.scope():\n",
|
|
||||||
"# multi_worker_model = get_compiled_model()\n",
|
|
||||||
"# multi_worker_model.fit(\n",
|
|
||||||
"# x=train_data_gen,\n",
|
|
||||||
"# epochs=epochs,\n",
|
|
||||||
"# steps_per_epoch=total_train // batch_size\n",
|
|
||||||
"# )"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#model.summary()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"TRAINING info\")\n",
|
|
||||||
"print(train_dir)\n",
|
|
||||||
"print(train_good_dir)\n",
|
|
||||||
"print(train_bad_dir)\n",
|
|
||||||
"print(train_image_generator)\n",
|
|
||||||
"print(train_data_gen)\n",
|
|
||||||
"#print(sample_train_images)\n",
|
|
||||||
"#print(history)\n",
|
|
||||||
"#model.to_json()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#history = model.fit(X, y, batch_size=32, epochs=40, validation_split=0.1)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save('data/models/FOO/wut-train-cluster.tf')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save('data/models/FOO/wut-train-cluster.h5')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save_weights('data/models/FOO/wut-weights-train-cluster.tf')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save_weights('data/models/FOO/wut-weights-train-cluster.h5')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"acc = history.history['accuracy']\n",
|
|
||||||
"val_acc = history.history['val_accuracy']\n",
|
|
||||||
"loss = history.history['loss']\n",
|
|
||||||
"val_loss = history.history['val_loss']\n",
|
|
||||||
"epochs_range = range(epochs)\n",
|
|
||||||
"plt.figure(figsize=(8, 8))\n",
|
|
||||||
"plt.subplot(1, 2, 1)\n",
|
|
||||||
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
|
|
||||||
"plt.plot(epochs_range, val_acc, label='Validation Accuracy')\n",
|
|
||||||
"plt.legend(loc='lower right')\n",
|
|
||||||
"plt.title('Training and Validation Accuracy')\n",
|
|
||||||
"plt.subplot(1, 2, 2)\n",
|
|
||||||
"plt.plot(epochs_range, loss, label='Training Loss')\n",
|
|
||||||
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
|
|
||||||
"plt.legend(loc='upper right')\n",
|
|
||||||
"plt.title('Training and Validation Loss')\n",
|
|
||||||
"plt.show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# The End"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.7.3"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 4
|
|
||||||
}
|
|
|
@ -1,410 +0,0 @@
|
||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# wut-train-cluster --- What U Think? SatNOGS Observation AI, training application cluster edition.\n",
|
|
||||||
"#\n",
|
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
|
||||||
"#\n",
|
|
||||||
"# Based on data/train and data/val directories builds a wut.tf file.\n",
|
|
||||||
"# GPLv3+\n",
|
|
||||||
"# Built using Jupyter, Tensorflow, Keras"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from __future__ import absolute_import, division, print_function, unicode_literals\n",
|
|
||||||
"from __future__ import print_function\n",
|
|
||||||
"import os\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import simplejson as json\n",
|
|
||||||
"import datetime"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import tensorflow as tf\n",
|
|
||||||
"import tensorflow.python.keras\n",
|
|
||||||
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
|
|
||||||
"from tensorflow.python.keras import optimizers\n",
|
|
||||||
"from tensorflow.python.keras import Sequential\n",
|
|
||||||
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
|
||||||
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
|
||||||
"from tensorflow.python.keras.layers import Input, concatenate\n",
|
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
|
||||||
"from tensorflow.python.keras.models import Model\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing import image\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import img_to_array\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
|
||||||
"from tensorflow.python.keras.preprocessing.image import load_img"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%matplotlib inline\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"import seaborn as sns\n",
|
|
||||||
"from sklearn.decomposition import PCA\n",
|
|
||||||
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
|
||||||
"import ipywidgets as widgets\n",
|
|
||||||
"from IPython.display import display, Image"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"os.environ[\"TF_CONFIG\"] = json.dumps({\n",
|
|
||||||
" \"cluster\": {\n",
|
|
||||||
" \"worker\": [ \"ml1:2222\", \"ml2:2222\", \"ml3:2222\", \"ml4:2222\", \"ml5:2222\" ]\n",
|
|
||||||
" },\n",
|
|
||||||
" \"task\": {\"type\": \"worker\", \"index\": 1 },\n",
|
|
||||||
" \"num_workers\": 5\n",
|
|
||||||
"})"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"IMG_HEIGHT = 416\n",
|
|
||||||
"IMG_WIDTH= 804\n",
|
|
||||||
"batch_size = 128\n",
|
|
||||||
"epochs = 32\n",
|
|
||||||
"# Full size, machine barfs probably needs more RAM\n",
|
|
||||||
"#IMG_HEIGHT = 832\n",
|
|
||||||
"#IMG_WIDTH = 1606\n",
|
|
||||||
"# Good results\n",
|
|
||||||
"#batch_size = 128\n",
|
|
||||||
"#epochs = 6"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#from tensorflow.python.framework.ops import disable_eager_execution\n",
|
|
||||||
"#disable_eager_execution()\n",
|
|
||||||
"# MultiWorkerMirroredStrategy needs TF_CONFIG\n",
|
|
||||||
"#multiworker_strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n",
|
|
||||||
"# Central Storage Strategy\n",
|
|
||||||
"#central_storage_strategy = tf.distribute.experimental.CentralStorageStrategy()\n",
|
|
||||||
"# ParameterServerStrategy needs TF_CONFIG\n",
|
|
||||||
"#ps_strategy = tf.distribute.experimental.ParameterServerStrategy()\n",
|
|
||||||
"# OneDeviceStrategy No cluster\n",
|
|
||||||
"#strategy = tf.distribute.OneDeviceStrategy(device=\"/CPU:0\")\n",
|
|
||||||
"# Mirrored Strategy\n",
|
|
||||||
"mirrored_strategy = tf.distribute.MirroredStrategy()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"train_dir = os.path.join('data/', 'train')\n",
|
|
||||||
"val_dir = os.path.join('data/', 'val')\n",
|
|
||||||
"train_good_dir = os.path.join(train_dir, 'good')\n",
|
|
||||||
"train_bad_dir = os.path.join(train_dir, 'bad')\n",
|
|
||||||
"val_good_dir = os.path.join(val_dir, 'good')\n",
|
|
||||||
"val_bad_dir = os.path.join(val_dir, 'bad')\n",
|
|
||||||
"num_train_good = len(os.listdir(train_good_dir))\n",
|
|
||||||
"num_train_bad = len(os.listdir(train_bad_dir))\n",
|
|
||||||
"num_val_good = len(os.listdir(val_good_dir))\n",
|
|
||||||
"num_val_bad = len(os.listdir(val_bad_dir))\n",
|
|
||||||
"total_train = num_train_good + num_train_bad\n",
|
|
||||||
"total_val = num_val_good + num_val_bad"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print('total training good images:', num_train_good)\n",
|
|
||||||
"print('total training bad images:', num_train_bad)\n",
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Total training images:\", total_train)\n",
|
|
||||||
"print('total validation good images:', num_val_good)\n",
|
|
||||||
"print('total validation bad images:', num_val_bad)\n",
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Total validation images:\", total_val)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Reduce training and validation set when testing\")\n",
|
|
||||||
"total_train = 16\n",
|
|
||||||
"total_val = 16\n",
|
|
||||||
"print(\"Reduced training images:\", total_train)\n",
|
|
||||||
"print(\"Reduced validation images:\", total_val)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"train_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")\n",
|
|
||||||
"val_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")\n",
|
|
||||||
"train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,\n",
|
|
||||||
" directory=train_dir,\n",
|
|
||||||
" shuffle=True,\n",
|
|
||||||
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
|
||||||
" class_mode='binary')\n",
|
|
||||||
"val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,\n",
|
|
||||||
" directory=val_dir,\n",
|
|
||||||
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
|
||||||
" class_mode='binary')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"sample_train_images, _ = next(train_data_gen)\n",
|
|
||||||
"sample_val_images, _ = next(val_data_gen)\n",
|
|
||||||
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
|
|
||||||
"def plotImages(images_arr):\n",
|
|
||||||
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
|
||||||
" axes = axes.flatten()\n",
|
|
||||||
" for img, ax in zip( images_arr, axes):\n",
|
|
||||||
" ax.imshow(img)\n",
|
|
||||||
" ax.axis('off')\n",
|
|
||||||
" plt.tight_layout()\n",
|
|
||||||
" plt.show()\n",
|
|
||||||
" \n",
|
|
||||||
"plotImages(sample_train_images[0:3])\n",
|
|
||||||
"plotImages(sample_val_images[0:3])"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"%load_ext tensorboard\n",
|
|
||||||
"!rm -rf ./clusterlogs/\n",
|
|
||||||
"#log_dir=\"clusterlogs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n",
|
|
||||||
"log_dir=\"clusterlogs\"\n",
|
|
||||||
"#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n",
|
|
||||||
"tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir)\n",
|
|
||||||
"%tensorboard --logdir clusterlogs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#with multiworker_strategy.scope():\n",
|
|
||||||
"with mirrored_strategy.scope():\n",
|
|
||||||
" model = Sequential([\n",
|
|
||||||
" Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Conv2D(32, 3, padding='same', activation='relu'),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Conv2D(64, 3, padding='same', activation='relu'),\n",
|
|
||||||
" MaxPooling2D(),\n",
|
|
||||||
" Flatten(),\n",
|
|
||||||
" Dense(512, activation='relu'),\n",
|
|
||||||
" Dense(1, activation='sigmoid')\n",
|
|
||||||
" ])\n",
|
|
||||||
" model.compile(optimizer='adam',\n",
|
|
||||||
" loss='binary_crossentropy',\n",
|
|
||||||
" metrics=['accuracy'])\n",
|
|
||||||
" history = model.fit_generator(\n",
|
|
||||||
" train_data_gen,\n",
|
|
||||||
" steps_per_epoch=total_train // batch_size,\n",
|
|
||||||
" epochs=epochs,\n",
|
|
||||||
" validation_data=val_data_gen,\n",
|
|
||||||
" validation_steps=total_val // batch_size,\n",
|
|
||||||
" verbose=2,\n",
|
|
||||||
" callbacks=[tensorboard_callback]\n",
|
|
||||||
" )"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#strategy.num_replicas_in_sync"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.summary()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"Image.LOAD_TRUNCATED_IMAGES = True"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"acc = history.history['accuracy']\n",
|
|
||||||
"val_acc = history.history['val_accuracy']\n",
|
|
||||||
"loss = history.history['loss']\n",
|
|
||||||
"val_loss = history.history['val_loss']\n",
|
|
||||||
"epochs_range = range(epochs)\n",
|
|
||||||
"plt.figure(figsize=(8, 8))\n",
|
|
||||||
"plt.subplot(1, 2, 1)\n",
|
|
||||||
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
|
|
||||||
"plt.plot(epochs_range, val_acc, label='Validation Accuracy')\n",
|
|
||||||
"plt.legend(loc='lower right')\n",
|
|
||||||
"plt.title('Training and Validation Accuracy')\n",
|
|
||||||
"plt.subplot(1, 2, 2)\n",
|
|
||||||
"plt.plot(epochs_range, loss, label='Training Loss')\n",
|
|
||||||
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
|
|
||||||
"plt.legend(loc='upper right')\n",
|
|
||||||
"plt.title('Training and Validation Loss')\n",
|
|
||||||
"plt.show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"TRAINING info\")\n",
|
|
||||||
"print(train_dir)\n",
|
|
||||||
"print(train_good_dir)\n",
|
|
||||||
"print(train_bad_dir)\n",
|
|
||||||
"print(train_image_generator)\n",
|
|
||||||
"print(train_data_gen)\n",
|
|
||||||
"#print(sample_train_images)\n",
|
|
||||||
"print(history)\n",
|
|
||||||
"model.to_json()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Save .tf model data here"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save('data/models/DUV/wut-train-cluster.tf')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save('data/models/DUV/wut-train-cluster.h5')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save_weights('data/models/DUV/wut-weights-train-cluster.tf')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"model.save_weights('data/models/DUV/wut-weights-train-cluster.h5')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# The End"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.7.3"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 4
|
|
||||||
}
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
[Unit]
|
||||||
|
Description=voila-wut-alpha
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Environment="PATH=/home/wut/.local/bin:/usr/local/bin:/usr/bin:/bin"
|
||||||
|
Type=simple
|
||||||
|
PIDFile=/run/voila.pid
|
||||||
|
ExecStart=/home/wut/.local/bin/voila --no-browser --port=8871 --ExecutePreprocessor.timeout=600 --autoreload=True --Voila.ip=localhost --VoilaConfiguration.enable_nbextensions=False --theme=dark devel/spacecruft/satnogs-wut/notebooks/wut-web-alpha.ipynb
|
||||||
|
WorkingDirectory=/home/wut
|
||||||
|
User=wut
|
||||||
|
Group=wut
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
[Unit]
|
||||||
|
Description=voila-wut-beta
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Environment="PATH=/home/wut/.local/bin:/usr/local/bin:/usr/bin:/bin"
|
||||||
|
Type=simple
|
||||||
|
PIDFile=/run/voila.pid
|
||||||
|
ExecStart=/home/wut/.local/bin/voila --no-browser --port=8873 --ExecutePreprocessor.timeout=600 --autoreload=True --Voila.ip=localhost --VoilaConfiguration.enable_nbextensions=False --theme=dark devel/spacecruft/satnogs-wut/notebooks/wut-web-beta.ipynb
|
||||||
|
WorkingDirectory=/home/wut
|
||||||
|
User=wut
|
||||||
|
Group=wut
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
[Unit]
|
||||||
|
Description=voila-wut
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Environment="PATH=/home/wut/.local/bin:/usr/local/bin:/usr/bin:/bin"
|
||||||
|
Type=simple
|
||||||
|
PIDFile=/run/voila.pid
|
||||||
|
ExecStart=/home/wut/.local/bin/voila --no-browser --port=8867 --ExecutePreprocessor.timeout=600 --autoreload=True --Voila.ip=localhost --VoilaConfiguration.enable_nbextensions=False --theme=dark devel/spacecruft/satnogs-wut/notebooks/wut-web.ipynb
|
||||||
|
WorkingDirectory=/home/wut
|
||||||
|
User=wut
|
||||||
|
Group=wut
|
||||||
|
Restart=always
|
||||||
|
RestartSec=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
|
@ -10,24 +10,10 @@
|
||||||
"#\n",
|
"#\n",
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
||||||
"# Based on data/train and data/val directories builds a wut.h5 file.\n",
|
"# Based on data/train and data/val directories builds a wut.h5 file.\n",
|
||||||
"# Reads wut.h5 and tests files in data/test/unvetted/"
|
"# Reads wut.h5 and tests files in data/test/unvetted/\n",
|
||||||
]
|
"#\n",
|
||||||
},
|
"# GPLv3+\n",
|
||||||
{
|
"#\n",
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# GPLv3+"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Built using Jupyter, Tensorflow, Keras"
|
"# Built using Jupyter, Tensorflow, Keras"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -37,24 +23,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Start\")"
|
"import os\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import os"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import numpy as np"
|
"import numpy as np"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -64,7 +33,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import tensorflow.python.keras"
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -73,24 +42,23 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from tensorflow.python.keras import Sequential\n",
|
"import tensorflow as tf\n",
|
||||||
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
"from tensorflow import keras\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
"from tensorflow.keras import layers\n",
|
||||||
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
"from tensorflow.keras import optimizers\n",
|
||||||
"from tensorflow.python.keras import optimizers\n",
|
"from tensorflow.keras import Sequential\n",
|
||||||
"from tensorflow.python.keras.preprocessing import image\n",
|
"from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
"from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import load_img\n",
|
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import img_to_array"
|
"from tensorflow.keras.layers import Input, concatenate\n",
|
||||||
]
|
"from tensorflow.keras.models import load_model\n",
|
||||||
},
|
"from tensorflow.keras.models import Model\n",
|
||||||
{
|
"from tensorflow.keras.preprocessing import image\n",
|
||||||
"cell_type": "code",
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
||||||
"execution_count": null,
|
"from tensorflow.keras.preprocessing.image import img_to_array\n",
|
||||||
"metadata": {},
|
"from tensorflow.keras.preprocessing.image import load_img\n",
|
||||||
"outputs": [],
|
"from tensorflow.keras.utils import model_to_dot\n",
|
||||||
"source": [
|
"from tensorflow.keras.utils import plot_model"
|
||||||
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -103,39 +71,15 @@
|
||||||
"%matplotlib inline\n",
|
"%matplotlib inline\n",
|
||||||
"import matplotlib.pyplot as plt\n",
|
"import matplotlib.pyplot as plt\n",
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"from sklearn.decomposition import PCA"
|
"from sklearn.decomposition import PCA\n",
|
||||||
]
|
"\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Seaborn pip dependency\n",
|
"# Seaborn pip dependency\n",
|
||||||
"import seaborn as sns"
|
"import seaborn as sns\n",
|
||||||
]
|
"\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Interact\n",
|
|
||||||
"# https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html\n",
|
|
||||||
"from __future__ import print_function\n",
|
"from __future__ import print_function\n",
|
||||||
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
||||||
"import ipywidgets as widgets"
|
"import ipywidgets as widgets\n",
|
||||||
]
|
"\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Display Images\n",
|
|
||||||
"from IPython.display import display, Image"
|
"from IPython.display import display, Image"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -145,7 +89,13 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Python import done\")"
|
"#ENCODING='APT'\n",
|
||||||
|
"#ENCODING='CW'\n",
|
||||||
|
"#ENCODING='FM'\n",
|
||||||
|
"#ENCODING='FSK9k6'\n",
|
||||||
|
"ENCODING='GMSK2k4'\n",
|
||||||
|
"#ENCODING='GMSK4k8'\n",
|
||||||
|
"#ENCODING='USB'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -154,7 +104,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Load HDF file\")"
|
"h5_file=(\"wut-\" + ENCODING + \".h5\")\n",
|
||||||
|
"model_path_h5 = os.path.join('/srv/satnogs/data/models/', ENCODING, h5_file)\n",
|
||||||
|
"print(model_path_h5)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -163,7 +115,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model = load_model('data/models/wut-DUV.tf')"
|
"model = load_model(model_path_h5)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -172,16 +124,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"test_dir = os.path.join('data/', 'test')"
|
"test_dir = os.path.join('/srv/satnogs/data/', 'test')\n",
|
||||||
]
|
"num_test = len(os.listdir(test_dir))\n",
|
||||||
},
|
"print(\"Will test\", num_test, \"waterfall PNG files under this driectory:\\n\", test_dir)"
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"num_test = len(os.listdir(test_dir))"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -191,11 +136,11 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Good results\n",
|
"# Good results\n",
|
||||||
"#batch_size = 128\n",
|
"batch_size = 128\n",
|
||||||
"#epochs = 6\n",
|
"epochs = 6\n",
|
||||||
"# Testing, faster more inaccurate results\n",
|
"# Testing, faster more inaccurate results\n",
|
||||||
"batch_size = 32\n",
|
"#batch_size = 32\n",
|
||||||
"epochs = 3"
|
"#epochs = 3"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -223,15 +168,6 @@
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(test_dir)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
|
@ -247,7 +183,9 @@
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"sample_test_images, _ = next(test_data_gen)"
|
"sample_test_images, _ = next(test_data_gen)"
|
||||||
|
@ -259,7 +197,15 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
|
"print(\"Number of observations to test:\", num_test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
"def plotImages(images_arr):\n",
|
"def plotImages(images_arr):\n",
|
||||||
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
||||||
" axes = axes.flatten()\n",
|
" axes = axes.flatten()\n",
|
||||||
|
@ -276,28 +222,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"plotImages(sample_test_images[0:1])"
|
"plotImages(sample_test_images[0:2])"
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# https://keras.io/models/sequential/\n",
|
|
||||||
"print(\"predict\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#pred=model.predict_generator(test_data_gen,\n",
|
|
||||||
"#steps=1,\n",
|
|
||||||
"#verbose=1)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -309,8 +234,7 @@
|
||||||
"prediction = model.predict(\n",
|
"prediction = model.predict(\n",
|
||||||
" x=test_data_gen,\n",
|
" x=test_data_gen,\n",
|
||||||
" verbose=1\n",
|
" verbose=1\n",
|
||||||
")\n",
|
")"
|
||||||
"print(\"end predict\")"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -328,7 +252,6 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Show prediction score\n",
|
|
||||||
"print(prediction)"
|
"print(prediction)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -372,14 +295,12 @@
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": []
|
||||||
"# The End"
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
|
@ -393,7 +314,7 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.3"
|
"version": "3.10.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
|
@ -3,22 +3,17 @@
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# wut-train --- What U Think? SatNOGS Observation AI, training application.\n",
|
"# wut-train --- What U Think? SatNOGS Observation AI, training application.\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
||||||
"#\n",
|
"#\n",
|
||||||
"# Based on data/train and data/val directories builds a wut.h5 file."
|
"# Based on data/train and data/val directories builds a wut.h5 file.\n",
|
||||||
]
|
"#\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# GPLv3+"
|
"# GPLv3+"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -28,34 +23,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Built using Jupyter, Tensorflow, Keras"
|
"from __future__ import print_function\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"Start\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import os\n",
|
"import os\n",
|
||||||
"import datetime"
|
"import datetime\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import numpy as np"
|
"import numpy as np"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -65,7 +35,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import tensorflow.python.keras"
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -74,15 +44,23 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from tensorflow.python.keras import Sequential\n",
|
"import tensorflow as tf\n",
|
||||||
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
"from tensorflow import keras\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
"from tensorflow.keras import layers\n",
|
||||||
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
"from tensorflow.keras import optimizers\n",
|
||||||
"from tensorflow.python.keras import optimizers\n",
|
"from tensorflow.keras import Sequential\n",
|
||||||
"from tensorflow.python.keras.preprocessing import image\n",
|
"from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
"from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import load_img\n",
|
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import img_to_array"
|
"from tensorflow.keras.layers import Input, concatenate\n",
|
||||||
|
"from tensorflow.keras.models import load_model\n",
|
||||||
|
"from tensorflow.keras.models import Model\n",
|
||||||
|
"from tensorflow.keras.preprocessing import image\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import img_to_array\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import load_img\n",
|
||||||
|
"from tensorflow.keras.utils import model_to_dot\n",
|
||||||
|
"from tensorflow.keras.utils import plot_model"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -91,53 +69,15 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from tensorflow.python.keras.models import Model\n",
|
|
||||||
"from tensorflow.python.keras.layers import Input, concatenate"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Visualization\n",
|
|
||||||
"%matplotlib inline\n",
|
"%matplotlib inline\n",
|
||||||
"import matplotlib.pyplot as plt\n",
|
"import matplotlib.pyplot as plt\n",
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"from sklearn.decomposition import PCA"
|
"from sklearn.decomposition import PCA\n",
|
||||||
]
|
"import seaborn as sns\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Seaborn pip dependency\n",
|
|
||||||
"import seaborn as sns"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Interact\n",
|
|
||||||
"# https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html\n",
|
|
||||||
"from __future__ import print_function\n",
|
|
||||||
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
||||||
"import ipywidgets as widgets"
|
"import ipywidgets as widgets\n",
|
||||||
|
"from IPython.display import display, Image\n",
|
||||||
|
"from IPython.display import SVG"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -146,8 +86,13 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Display Images\n",
|
"#ENCODING='APT'\n",
|
||||||
"from IPython.display import display, Image"
|
"#ENCODING='BPSK1k2' # Fail\n",
|
||||||
|
"#ENCODING='FSK9k6'\n",
|
||||||
|
"#ENCODING='FM'\n",
|
||||||
|
"ENCODING='GMSK2k4'\n",
|
||||||
|
"#ENCODING='GMSK4k8'\n",
|
||||||
|
"#ENCODING='USB'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -156,18 +101,20 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Python import done\")"
|
"#batch_size = 8\n",
|
||||||
]
|
"#atch_size = 16\n",
|
||||||
},
|
"#atch_size = 32\n",
|
||||||
{
|
"batch_size = 64\n",
|
||||||
"cell_type": "code",
|
"#batch_size = 128\n",
|
||||||
"execution_count": null,
|
"#batch_size = 256\n",
|
||||||
"metadata": {},
|
"#epochs = 4\n",
|
||||||
"outputs": [],
|
"epochs = 8\n",
|
||||||
"source": [
|
"#IMG_WIDTH = 208\n",
|
||||||
"train_dir = os.path.join('data/', 'train')\n",
|
"#IMG_HEIGHT = 402\n",
|
||||||
"val_dir = os.path.join('data/', 'val')\n",
|
"IMG_WIDTH = 416\n",
|
||||||
"test_dir = os.path.join('data/', 'test')"
|
"IMG_HEIGHT = 803\n",
|
||||||
|
"#IMG_WIDTH = 823\n",
|
||||||
|
"#IMG_HEIGHT = 1603"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -176,55 +123,16 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"train_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING, 'train')\n",
|
||||||
|
"val_dir = os.path.join('/srv/satnogs/data/txmodes', ENCODING, 'val')\n",
|
||||||
"train_good_dir = os.path.join(train_dir, 'good')\n",
|
"train_good_dir = os.path.join(train_dir, 'good')\n",
|
||||||
"train_bad_dir = os.path.join(train_dir, 'bad')"
|
"train_bad_dir = os.path.join(train_dir, 'bad')\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"val_good_dir = os.path.join(val_dir, 'good')\n",
|
"val_good_dir = os.path.join(val_dir, 'good')\n",
|
||||||
"val_bad_dir = os.path.join(val_dir, 'bad')"
|
"val_bad_dir = os.path.join(val_dir, 'bad')\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"num_train_good = len(os.listdir(train_good_dir))\n",
|
"num_train_good = len(os.listdir(train_good_dir))\n",
|
||||||
"num_train_bad = len(os.listdir(train_bad_dir))"
|
"num_train_bad = len(os.listdir(train_bad_dir))\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"num_val_good = len(os.listdir(val_good_dir))\n",
|
"num_val_good = len(os.listdir(val_good_dir))\n",
|
||||||
"num_val_bad = len(os.listdir(val_bad_dir))"
|
"num_val_bad = len(os.listdir(val_bad_dir))\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"num_test = len(os.listdir(test_dir))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"total_train = num_train_good + num_train_bad\n",
|
"total_train = num_train_good + num_train_bad\n",
|
||||||
"total_val = num_val_good + num_val_bad"
|
"total_val = num_val_good + num_val_bad"
|
||||||
]
|
]
|
||||||
|
@ -235,10 +143,18 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print('total training good images:', num_train_good)\n",
|
"print('Training good images: ', num_train_good)\n",
|
||||||
"print('total training bad images:', num_train_bad)\n",
|
"print('Training bad images: ', num_train_bad)\n",
|
||||||
"print(\"--\")\n",
|
"print('Training images: ', total_train)\n",
|
||||||
"print(\"Total training images:\", total_train)"
|
"print('Validation good images: ', num_val_good)\n",
|
||||||
|
"print('Validation bad images: ', num_val_bad)\n",
|
||||||
|
"print('Validation images: ', total_val)\n",
|
||||||
|
"print('')\n",
|
||||||
|
"#print('Reduce training and validation set')\n",
|
||||||
|
"#total_train = 1000\n",
|
||||||
|
"#total_val = 1000\n",
|
||||||
|
"print('Training reduced to: ', total_train)\n",
|
||||||
|
"print('Validation reduced to: ', total_val)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -247,10 +163,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print('total validation good images:', num_val_good)\n",
|
"train_image_generator = ImageDataGenerator( rescale=1./255 )"
|
||||||
"print('total validation bad images:', num_val_bad)\n",
|
|
||||||
"print(\"--\")\n",
|
|
||||||
"print(\"Total validation images:\", total_val)"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -259,81 +172,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"Reduce training and validation set when testing\")\n",
|
"val_image_generator = ImageDataGenerator( rescale=1./255 )"
|
||||||
"#total_train = 100\n",
|
|
||||||
"#total_val = 100\n",
|
|
||||||
"print(\"Train =\")\n",
|
|
||||||
"print(total_train)\n",
|
|
||||||
"print(\"Validation =\")\n",
|
|
||||||
"print(total_val)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Good results\n",
|
|
||||||
"batch_size = 128\n",
|
|
||||||
"epochs = 6\n",
|
|
||||||
"#\n",
|
|
||||||
"# Large Test\n",
|
|
||||||
"#batch_size = 512 # FAIL\n",
|
|
||||||
"#batch_size = 256 # FAIL\n",
|
|
||||||
"#batch_size = 192 # BEST SO FAR\n",
|
|
||||||
"#epochs = 16 # BEST SO FAR\n",
|
|
||||||
"#\n",
|
|
||||||
"# Fast, but reasonable answers\n",
|
|
||||||
"#batch_size = 128\n",
|
|
||||||
"#epochs = 4\n",
|
|
||||||
"# Faster, but reasonable answers ?\n",
|
|
||||||
"#batch_size = 32\n",
|
|
||||||
"#epochs = 2\n",
|
|
||||||
"#\n",
|
|
||||||
"# Testing, faster more inaccurate results\n",
|
|
||||||
"#batch_size = 16\n",
|
|
||||||
"#epochs = 2\n",
|
|
||||||
"#\n",
|
|
||||||
"# Smallest set for testing\n",
|
|
||||||
"#batch_size = 1\n",
|
|
||||||
"#epochs = 1"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# Half size\n",
|
|
||||||
"IMG_HEIGHT = 416\n",
|
|
||||||
"IMG_WIDTH= 804\n",
|
|
||||||
"# Full size, machine barfs probably needs more RAM\n",
|
|
||||||
"#IMG_HEIGHT = 832\n",
|
|
||||||
"#IMG_WIDTH = 1606"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"train_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"val_image_generator = ImageDataGenerator(\n",
|
|
||||||
" rescale=1./255\n",
|
|
||||||
")"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -385,7 +224,6 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.\n",
|
|
||||||
"def plotImages(images_arr):\n",
|
"def plotImages(images_arr):\n",
|
||||||
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
" fig, axes = plt.subplots(1, 3, figsize=(20,20))\n",
|
||||||
" axes = axes.flatten()\n",
|
" axes = axes.flatten()\n",
|
||||||
|
@ -402,7 +240,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#plotImages(sample_train_images[0:3])"
|
"plotImages(sample_train_images[0:3])"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -411,7 +249,18 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"#plotImages(sample_val_images[0:3])"
|
"plotImages(sample_val_images[0:3])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# If you need to kill tensorboad, when it says stuff like this:\n",
|
||||||
|
"# Reusing TensorBoard on port 6006 (pid 13650), started 0:04:20 ago. (Use '!kill 13650' to kill it.)\n",
|
||||||
|
"#!rm -rf /tmp/.tensorboard-info/"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -422,11 +271,18 @@
|
||||||
"source": [
|
"source": [
|
||||||
"%load_ext tensorboard\n",
|
"%load_ext tensorboard\n",
|
||||||
"!rm -rf ./logs/\n",
|
"!rm -rf ./logs/\n",
|
||||||
"#os.mkdir(\"logs\")\n",
|
"os.mkdir(\"logs\")\n",
|
||||||
"log_dir=\"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n",
|
"log_dir = \"logs\"\n",
|
||||||
"tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n",
|
"#log_dir=\"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")"
|
||||||
"#logdir = \"logs\"\n",
|
]
|
||||||
"#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=logdir)"
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, write_graph=True, write_images=True, embeddings_freq=1, update_freq='batch')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -454,9 +310,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model.compile(optimizer='adam',\n",
|
"wutoptimizer = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=True)\n",
|
||||||
" loss='binary_crossentropy',\n",
|
"wutloss = 'binary_crossentropy'\n",
|
||||||
" metrics=['accuracy'])"
|
"wutmetrics = ['accuracy']"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -465,7 +321,18 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model.summary()"
|
"model.compile(optimizer=wutoptimizer,\n",
|
||||||
|
" loss=wutloss,\n",
|
||||||
|
" metrics=[wutmetrics])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#model.summary()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -483,7 +350,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%tensorboard --logdir logs/fit"
|
"%tensorboard --logdir logs"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -492,14 +359,30 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"history = model.fit_generator(\n",
|
"print(train_data_gen)\n",
|
||||||
|
"print(total_train)\n",
|
||||||
|
"print(batch_size)\n",
|
||||||
|
"print(epochs)\n",
|
||||||
|
"print(val_data_gen)\n",
|
||||||
|
"print(total_val)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"history = model.fit(\n",
|
||||||
" train_data_gen,\n",
|
" train_data_gen,\n",
|
||||||
" steps_per_epoch=total_train // batch_size,\n",
|
" steps_per_epoch=total_train // batch_size,\n",
|
||||||
" epochs=epochs,\n",
|
" epochs=epochs,\n",
|
||||||
|
" verbose=1,\n",
|
||||||
" validation_data=val_data_gen,\n",
|
" validation_data=val_data_gen,\n",
|
||||||
" validation_steps=total_val // batch_size,\n",
|
" validation_steps=total_val // batch_size,\n",
|
||||||
|
" shuffle=True,\n",
|
||||||
" callbacks=[tensorboard_callback],\n",
|
" callbacks=[tensorboard_callback],\n",
|
||||||
" verbose=1\n",
|
" use_multiprocessing=False\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -509,14 +392,37 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"acc = history.history['accuracy']\n",
|
"acc = history.history['accuracy']"
|
||||||
"val_acc = history.history['val_accuracy']\n",
|
]
|
||||||
"\n",
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"val_acc = history.history['val_accuracy']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
"loss = history.history['loss']\n",
|
"loss = history.history['loss']\n",
|
||||||
"val_loss = history.history['val_loss']\n",
|
"val_loss = history.history['val_loss']\n",
|
||||||
"\n",
|
"\n",
|
||||||
"epochs_range = range(epochs)\n",
|
"epochs_range = range(epochs)\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"save_plot_dir = os.path.join('/srv/satnogs/data/models/', ENCODING)\n",
|
||||||
|
"os.makedirs(save_plot_dir, exist_ok=True)\n",
|
||||||
|
"plot_file=(\"wut-plot-\" + ENCODING + \".png\")\n",
|
||||||
|
"save_path_plot = os.path.join(save_plot_dir, plot_file)\n",
|
||||||
|
"print(save_path_plot)\n",
|
||||||
|
"\n",
|
||||||
"plt.figure(figsize=(8, 8))\n",
|
"plt.figure(figsize=(8, 8))\n",
|
||||||
"plt.subplot(1, 2, 1)\n",
|
"plt.subplot(1, 2, 1)\n",
|
||||||
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
|
"plt.plot(epochs_range, acc, label='Training Accuracy')\n",
|
||||||
|
@ -529,6 +435,7 @@
|
||||||
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
|
"plt.plot(epochs_range, val_loss, label='Validation Loss')\n",
|
||||||
"plt.legend(loc='upper right')\n",
|
"plt.legend(loc='upper right')\n",
|
||||||
"plt.title('Training and Validation Loss')\n",
|
"plt.title('Training and Validation Loss')\n",
|
||||||
|
"plt.savefig(save_path_plot)\n",
|
||||||
"plt.show()"
|
"plt.show()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -538,61 +445,13 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"print(\"TRAINING info\")"
|
"print(\"TRAINING info\")\n",
|
||||||
]
|
"print(train_dir)\n",
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(train_dir)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(train_good_dir)\n",
|
"print(train_good_dir)\n",
|
||||||
"print(train_bad_dir)"
|
"print(train_bad_dir)\n",
|
||||||
]
|
"print(train_image_generator)\n",
|
||||||
},
|
"print(train_data_gen)\n",
|
||||||
{
|
"#print(sample_train_images)\n",
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(train_image_generator)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(train_data_gen)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"#print(sample_train_images)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(history)"
|
"print(history)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -602,7 +461,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Save data here"
|
"h5_file=(\"wut-\" + ENCODING + \".h5\")\n",
|
||||||
|
"save_path_h5 = os.path.join('/srv/satnogs/data/models/', ENCODING, h5_file)\n",
|
||||||
|
"print(save_path_h5)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -611,7 +472,7 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model.save('data/models/wut-DUV.h5')"
|
"model.save(save_path_h5)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -620,7 +481,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model.save('data/models/wut-DUV.tf')"
|
"tf_modeldir=(\"wut-\" + ENCODING + \".tf\")\n",
|
||||||
|
"save_path_tf = os.path.join('/srv/satnogs/data/models/', ENCODING, tf_modeldir)\n",
|
||||||
|
"print(save_path_tf)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -629,13 +492,38 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# The End"
|
"model.save(save_path_tf)"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#plot_model(model, show_shapes=True, show_layer_names=True, expand_nested=True, dpi=72, to_file='/srv/satnogs/data/models/FM/plot_model.png')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#SVG(model_to_dot(model).create(prog='dot', format='svg'))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
|
@ -649,7 +537,7 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.3"
|
"version": "3.10.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
|
@ -0,0 +1,388 @@
|
||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# wut-web-alpha --- What U Think? Web App: SatNOGS Observation AI, makes predictions. ALPHA.\n",
|
||||||
|
"#\n",
|
||||||
|
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
||||||
|
"#\n",
|
||||||
|
"# GPLv3+\n",
|
||||||
|
"\n",
|
||||||
|
"#from collections import defaultdict\n",
|
||||||
|
"#import PIL as pil"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import json\n",
|
||||||
|
"import random\n",
|
||||||
|
"import tempfile\n",
|
||||||
|
"import shutil\n",
|
||||||
|
"import tensorflow as tf\n",
|
||||||
|
"import ipywidgets as wg\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"from ipywidgets import HBox, Label\n",
|
||||||
|
"from ipywidgets import interact, interactive, fixed, interact_manual\n",
|
||||||
|
"from ipywidgets import Layout, Button, Box\n",
|
||||||
|
"from ipywidgets import Layout, Button, Box, FloatText, Textarea, Dropdown, Label, IntSlider\n",
|
||||||
|
"from ipywidgets import AppLayout\n",
|
||||||
|
"from IPython.display import display, Image\n",
|
||||||
|
"from IPython.utils import text\n",
|
||||||
|
"from PIL import Image as im\n",
|
||||||
|
"from tensorflow.keras.models import load_model\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tf.get_logger().setLevel('ERROR')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def head_pic():\n",
|
||||||
|
" display(Image(filename='/srv/satnogs/satnogs-wut/pics/spacecruft-bk.png'))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%HTML\n",
|
||||||
|
"<H1><B>wut? ALPHA DEVELOPMENT VERSION<B></H1>\n",
|
||||||
|
"Main site: <A HREF=\"https://wut.spacecruft.org/\">wut.spacecruft.org</A><BR>\n",
|
||||||
|
"Test site: <A HREF=\"https://wut-beta.spacecruft.org/\">wut-beta.spacecruft.org</A>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"IMG_HEIGHT = 416\n",
|
||||||
|
"IMG_WIDTH = 804\n",
|
||||||
|
"batch_size = 32\n",
|
||||||
|
"minobsid = 1292461\n",
|
||||||
|
"maxobsid = 1470525\n",
|
||||||
|
"#maxobsid = 1591638\n",
|
||||||
|
"base_dir = ('/srv/wut/data')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def site_intro():\n",
|
||||||
|
" print(\"wut? --- What U Think? SatNOGS Observation AI development version.\")\n",
|
||||||
|
" print(\"Source Code: https://spacecruft.org/spacecruft/satnogs-wut\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#site_intro()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"def gen_image(test_data_gen,test_dir):\n",
|
||||||
|
" test_image_gen = ImageDataGenerator(rescale=1./255);\n",
|
||||||
|
" test_data_gen = test_image_gen.flow_from_directory(batch_size=1,\n",
|
||||||
|
" directory=test_dir,\n",
|
||||||
|
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
||||||
|
" shuffle=True,\n",
|
||||||
|
" class_mode='binary')\n",
|
||||||
|
" return test_data_gen\n",
|
||||||
|
"# Get rid of, but %%capture fails: Found 1 images belonging to 1 classes."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def rm_image_tmp(test_dir):\n",
|
||||||
|
" #print('Not removed:', test_dir)\n",
|
||||||
|
" shutil.rmtree(test_dir)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def gen_image_tmp(obs_waterfalltmp, showwater):\n",
|
||||||
|
" tmp_dir = tempfile.mkdtemp()\n",
|
||||||
|
" test_dir = os.path.join(tmp_dir)\n",
|
||||||
|
" os.makedirs(test_dir + '/unvetted', exist_ok=True)\n",
|
||||||
|
" shutil.copy(obs_waterfalltmp, test_dir + '/unvetted/') \n",
|
||||||
|
" \n",
|
||||||
|
" img = im.open(obs_waterfalltmp).resize( (100,200))\n",
|
||||||
|
" if showwater == True:\n",
|
||||||
|
" display(img)\n",
|
||||||
|
"\n",
|
||||||
|
" return test_dir"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def obs_wutsay(test_data_gen, model):\n",
|
||||||
|
" prediction = model.predict(\n",
|
||||||
|
" x=test_data_gen,\n",
|
||||||
|
" verbose=0)\n",
|
||||||
|
" predictions=[]\n",
|
||||||
|
" prediction_bool = (prediction >0.8)\n",
|
||||||
|
" predictions = prediction_bool.astype(int)\n",
|
||||||
|
" \n",
|
||||||
|
" return prediction_bool"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_obs_dict(datObs):\n",
|
||||||
|
" obsjsonfile=('/srv/satnogs/download/' + format(datObs) + '/' + format(datObs) + '.json')\n",
|
||||||
|
" with open(obsjsonfile) as f:\n",
|
||||||
|
" content = f.read()\n",
|
||||||
|
" data = json.loads(content)\n",
|
||||||
|
" res = {x : data[x] for x in range(len(data))}\n",
|
||||||
|
" res2 = dict(enumerate(data))\n",
|
||||||
|
" obs_dict=(res2[0])\n",
|
||||||
|
" \n",
|
||||||
|
" return obs_dict"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_obs_var(var, datObs):\n",
|
||||||
|
" obs_dict=get_obs_dict(datObs);\n",
|
||||||
|
" obs_var=(obs_dict[(var)])\n",
|
||||||
|
" \n",
|
||||||
|
" return obs_var"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def wutObs(datObs, showwater):\n",
|
||||||
|
" if int(datObs) > ( minobsid - 1 ) and int(datObs) < ( maxobsid + 1):\n",
|
||||||
|
" doallthethings(datObs, showwater)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def doallthethings(datObs, showwater):\n",
|
||||||
|
"\n",
|
||||||
|
" obs_waterfall=get_obs_var('waterfall', datObs) \n",
|
||||||
|
" obs_waterfallpic=os.path.basename(obs_waterfall)\n",
|
||||||
|
" obs_waterfalltmp = os.path.join('/srv/satnogs/download', str(get_obs_var('id', datObs)), obs_waterfallpic)\n",
|
||||||
|
"\n",
|
||||||
|
"# XXX NameError\n",
|
||||||
|
" txmode='DUV'\n",
|
||||||
|
" model=wut_model(txmode)\n",
|
||||||
|
" \n",
|
||||||
|
" if get_obs_var('transmitter_mode', datObs) == 'DUV':\n",
|
||||||
|
" txmode='DUV'\n",
|
||||||
|
" elif get_obs_var('transmitter_mode', datObs) == 'CW':\n",
|
||||||
|
" txmode='CW'\n",
|
||||||
|
" else:\n",
|
||||||
|
" txmode='DUV'\n",
|
||||||
|
" \n",
|
||||||
|
" test_dir=gen_image_tmp(obs_waterfalltmp, showwater);\n",
|
||||||
|
" test_data_gen=gen_image(obs_waterfalltmp, test_dir);\n",
|
||||||
|
"\n",
|
||||||
|
"# XXX NameError: name 'model' is not defined\n",
|
||||||
|
" prediction_bool=obs_wutsay(test_data_gen, model);\n",
|
||||||
|
"\n",
|
||||||
|
" print()\n",
|
||||||
|
" print('Observation ID: ', get_obs_var('id', datObs))\n",
|
||||||
|
" print('Encoding: ', get_obs_var('transmitter_mode', datObs),end='')\n",
|
||||||
|
" if get_obs_var('transmitter_mode', datObs) == 'DUV':\n",
|
||||||
|
" XXX=0\n",
|
||||||
|
" print(\" -- Using DUV training model.\")\n",
|
||||||
|
" else:\n",
|
||||||
|
" print(\" -- wut has not been trained on\", get_obs_var('transmitter_mode', datObs), \"encodings.\")\n",
|
||||||
|
" print('Human rating: ', get_obs_var('vetted_status', datObs))\n",
|
||||||
|
" if prediction_bool[0] == False:\n",
|
||||||
|
" rating = 'bad'\n",
|
||||||
|
" else:\n",
|
||||||
|
" rating = 'good'\n",
|
||||||
|
" print('wut AI rating: %s' % (rating)) \n",
|
||||||
|
" print()\n",
|
||||||
|
" print('https://network.satnogs.org/observations/' + str(get_obs_var('id', datObs)))\n",
|
||||||
|
" #!cat $obsjsonfile\n",
|
||||||
|
" rm_image_tmp(test_dir)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def wut_model(txmode):\n",
|
||||||
|
" model_file = os.path.join(base_dir, 'models', (txmode), 'wut-train-cluster.tf')\n",
|
||||||
|
" model = load_model(model_file)\n",
|
||||||
|
" \n",
|
||||||
|
" return model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def display_main():\n",
|
||||||
|
" print('Enter an Observation ID between', minobsid, 'and', maxobsid)\n",
|
||||||
|
" rand_obsid=random.randint(minobsid,maxobsid)\n",
|
||||||
|
" wutObs_slide = wg.IntText(value=rand_obsid, description=' ')\n",
|
||||||
|
" wutObs_check = wg.Checkbox(value=True, disabled=False)\n",
|
||||||
|
" wg.interact(wutObs, datObs=wutObs_slide, showwater=wutObs_check.value)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"head_pic()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"site_intro()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def load_txmode_models():\n",
|
||||||
|
" model_txmode_DUV=wut_model('DUV')\n",
|
||||||
|
" model_txmode_CW=wut_model('CW')\n",
|
||||||
|
" model_txmode_other=wut_model('DUV')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"load_txmode_models()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"display_main()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
|
@ -0,0 +1,344 @@
|
||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# wut-web --- What U Think? Web App: SatNOGS Observation AI, makes predictions.\n",
|
||||||
|
"#\n",
|
||||||
|
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
||||||
|
"#\n",
|
||||||
|
"# GPLv3+\n",
|
||||||
|
"\n",
|
||||||
|
"#from collections import defaultdict\n",
|
||||||
|
"#import PIL as pil"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import json\n",
|
||||||
|
"import random\n",
|
||||||
|
"import tempfile\n",
|
||||||
|
"import shutil\n",
|
||||||
|
"import tensorflow as tf\n",
|
||||||
|
"import ipywidgets as wg\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"from IPython.display import display, Image\n",
|
||||||
|
"from IPython.utils import text\n",
|
||||||
|
"from PIL import Image as im\n",
|
||||||
|
"from tensorflow.keras.models import load_model\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tf.get_logger().setLevel('ERROR')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"display(Image(filename='/srv/satnogs/satnogs-wut/pics/spacecruft-bk.png'))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%HTML\n",
|
||||||
|
"<H1><B>wut? BETA TEST VERSION<B></H1>\n",
|
||||||
|
"Main site: <A HREF=\"https://wut.spacecruft.org/\">wut.spacecruft.org</A><BR>\n",
|
||||||
|
"Development site: <A HREF=\"https://wut-alpha.spacecruft.org/\">wut-alpha.spacecruft.org</A>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"IMG_HEIGHT = 416\n",
|
||||||
|
"IMG_WIDTH = 804\n",
|
||||||
|
"batch_size = 32\n",
|
||||||
|
"minobsid = 1292461\n",
|
||||||
|
"maxobsid = 1470525\n",
|
||||||
|
"base_dir = ('/srv/wut/data')\n",
|
||||||
|
"sample_dir = ('/srv/wut/data/test/unvetted')\n",
|
||||||
|
"model_file = os.path.join(base_dir, 'models', 'wut-DUV-201912.tf')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def site_intro():\n",
|
||||||
|
" print(\"wut? --- What U Think? SatNOGS Observation AI.\")\n",
|
||||||
|
" print(\"\")\n",
|
||||||
|
" print(\"wut is an AI that rates SatNOGS Observations good or bad.\")\n",
|
||||||
|
" print(\"The training model was built from DUV transmissions recorded by the\")\n",
|
||||||
|
" print(\"SatNOGS network in December, 2019.\")\n",
|
||||||
|
" print(\"The plan is to have models of all SatNOGS modes (65 at present),\")\n",
|
||||||
|
" print(\"and you can enter an arbitrary Observation ID and the AI will return a rating.\")\n",
|
||||||
|
" print(\"\")\n",
|
||||||
|
" print(\"Source Code:\")\n",
|
||||||
|
" print(\"https://spacecruft.org/spacecruft/satnogs-wut\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"def wut_model(model_file):\n",
|
||||||
|
" model = load_model(model_file)\n",
|
||||||
|
" \n",
|
||||||
|
" return model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"def gen_image(test_data_gen,test_dir):\n",
|
||||||
|
" test_image_gen = ImageDataGenerator(rescale=1./255);\n",
|
||||||
|
" test_data_gen = test_image_gen.flow_from_directory(batch_size=1,\n",
|
||||||
|
" directory=test_dir,\n",
|
||||||
|
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
||||||
|
" shuffle=True,\n",
|
||||||
|
" class_mode='binary')\n",
|
||||||
|
" return test_data_gen"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"def rm_image_tmp(test_dir):\n",
|
||||||
|
" #print('Not removed:', test_dir)\n",
|
||||||
|
" shutil.rmtree(test_dir)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture --no-stderr --no-stdout\n",
|
||||||
|
"def gen_image_tmp(obs_waterfalltmp):\n",
|
||||||
|
" tmp_dir = tempfile.mkdtemp()\n",
|
||||||
|
" test_dir = os.path.join(tmp_dir)\n",
|
||||||
|
" os.makedirs(test_dir + '/unvetted', exist_ok=True)\n",
|
||||||
|
" shutil.copy(obs_waterfalltmp, test_dir + '/unvetted/') \n",
|
||||||
|
" \n",
|
||||||
|
" img = im.open(obs_waterfalltmp).resize( (100,200))\n",
|
||||||
|
" display(img)\n",
|
||||||
|
"\n",
|
||||||
|
" return test_dir"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"def obs_wutsay(test_data_gen):\n",
|
||||||
|
" prediction = model.predict(\n",
|
||||||
|
" x=test_data_gen,\n",
|
||||||
|
" verbose=0)\n",
|
||||||
|
" predictions=[]\n",
|
||||||
|
" prediction_bool = (prediction >0.8)\n",
|
||||||
|
" predictions = prediction_bool.astype(int)\n",
|
||||||
|
" \n",
|
||||||
|
" return prediction_bool"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_obs_dict(datObs):\n",
|
||||||
|
" obsjsonfile=('/srv/satnogs/download/' + format(datObs) + '/' + format(datObs) + '.json')\n",
|
||||||
|
" with open(obsjsonfile) as f:\n",
|
||||||
|
" content = f.read()\n",
|
||||||
|
" data = json.loads(content)\n",
|
||||||
|
" res = {x : data[x] for x in range(len(data))}\n",
|
||||||
|
" res2 = dict(enumerate(data))\n",
|
||||||
|
" obs_dict=(res2[0])\n",
|
||||||
|
" \n",
|
||||||
|
" return obs_dict"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_obs_var(var, datObs):\n",
|
||||||
|
" obs_dict=get_obs_dict(datObs);\n",
|
||||||
|
" obs_var=(obs_dict[(var)])\n",
|
||||||
|
" \n",
|
||||||
|
" return obs_var"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"def doallthethings(datObs):\n",
|
||||||
|
"\n",
|
||||||
|
" obs_waterfall=get_obs_var('waterfall', datObs) \n",
|
||||||
|
" obs_waterfallpic=os.path.basename(obs_waterfall)\n",
|
||||||
|
" obs_waterfalltmp = os.path.join('/srv/satnogs/download', str(get_obs_var('id', datObs)), obs_waterfallpic)\n",
|
||||||
|
"\n",
|
||||||
|
" test_dir=gen_image_tmp(obs_waterfalltmp);\n",
|
||||||
|
" test_data_gen=gen_image(obs_waterfalltmp, test_dir);\n",
|
||||||
|
" \n",
|
||||||
|
" prediction_bool=obs_wutsay(test_data_gen);\n",
|
||||||
|
"\n",
|
||||||
|
" print()\n",
|
||||||
|
" print('Observation ID: ', get_obs_var('id', datObs))\n",
|
||||||
|
" print('Encoding: ', get_obs_var('transmitter_mode', datObs))\n",
|
||||||
|
" print('Human rating: ', get_obs_var('vetted_status', datObs))\n",
|
||||||
|
" if prediction_bool[0] == False:\n",
|
||||||
|
" rating = 'bad'\n",
|
||||||
|
" else:\n",
|
||||||
|
" rating = 'good'\n",
|
||||||
|
" print('wut AI rating: %s' % (rating)) \n",
|
||||||
|
" print()\n",
|
||||||
|
" if get_obs_var('transmitter_mode', datObs) == 'DUV':\n",
|
||||||
|
" print(\"Using DUV training model.\")\n",
|
||||||
|
" else:\n",
|
||||||
|
" print(\"NOTE: wut has not been trained on\", get_obs_var('transmitter_mode', datObs), \"encodings.\")\n",
|
||||||
|
" print('https://network.satnogs.org/observations/' + str(get_obs_var('id', datObs)))\n",
|
||||||
|
" #!cat $obsjsonfile\n",
|
||||||
|
" rm_image_tmp(test_dir)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"def wutObs(datObs):\n",
|
||||||
|
" if int(datObs) > ( minobsid - 1 ) and int(datObs) < ( maxobsid + 1):\n",
|
||||||
|
" doallthethings(datObs)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"def display_main():\n",
|
||||||
|
" print('Enter an Observation ID between', minobsid, 'and', maxobsid)\n",
|
||||||
|
" rand_obsid=random.randint(minobsid,maxobsid)\n",
|
||||||
|
" wutObs_slide = wg.IntText(value=rand_obsid)\n",
|
||||||
|
" wg.interact(wutObs, datObs=wutObs_slide)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"site_intro()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model=wut_model(model_file)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"display_main()"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
|
@ -0,0 +1,224 @@
|
||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# wut-web --- What U Think? Web App: SatNOGS Observation AI, makes predictions.\n",
|
||||||
|
"#\n",
|
||||||
|
"# https://spacecruft.org/spacecruft/satnogs-wut\n",
|
||||||
|
"#\n",
|
||||||
|
"# GPLv3+"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import random\n",
|
||||||
|
"import tempfile\n",
|
||||||
|
"import shutil\n",
|
||||||
|
"import tensorflow as tf\n",
|
||||||
|
"from tensorflow.keras.models import load_model\n",
|
||||||
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
||||||
|
"from IPython.display import display, Image\n",
|
||||||
|
"from IPython.utils import text"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tf.get_logger().setLevel('ERROR')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"display(Image(filename='/srv/satnogs/satnogs-wut/pics/spacecruft-bk.png'))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%HTML\n",
|
||||||
|
"<H1><B>wut?<B></H1>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(\"wut? --- What U Think? SatNOGS Observation AI.\")\n",
|
||||||
|
"print(\"\")\n",
|
||||||
|
"print(\"wut is an AI that rates SatNOGS Observations good or bad.\")\n",
|
||||||
|
"print(\"The training model was built from DUV transmissions recorded by the\")\n",
|
||||||
|
"print(\"SatNOGS network in December, 2019.\")\n",
|
||||||
|
"print(\"When the page loads, the AI processes a random image and rates it good or bad.\")\n",
|
||||||
|
"print(\"The test pool has 500+ DUV waterfalls the AI hasn't seen before.\")\n",
|
||||||
|
"print(\"The plan is to have models of all SatNOGS modes (65 at present),\")\n",
|
||||||
|
"print(\"and you can enter an arbitrary Observation ID and the AI will return a rating.\")\n",
|
||||||
|
"print(\"\")\n",
|
||||||
|
"print(\"Source Code:\")\n",
|
||||||
|
"print(\"https://spacecruft.org/spacecruft/satnogs-wut\")\n",
|
||||||
|
"print(\"Alpha stage.\")\n",
|
||||||
|
"\n",
|
||||||
|
"IMG_HEIGHT = 416\n",
|
||||||
|
"IMG_WIDTH= 804\n",
|
||||||
|
"batch_size = 32\n",
|
||||||
|
"base_dir = ('/srv/wut/data')\n",
|
||||||
|
"sample_dir = ('/srv/wut/data/test/unvetted')\n",
|
||||||
|
"model_file = os.path.join(base_dir, 'models', 'wut-DUV-201912.tf')\n",
|
||||||
|
"tmp_dir = tempfile.mkdtemp()\n",
|
||||||
|
"test_dir = os.path.join(tmp_dir)\n",
|
||||||
|
"os.makedirs(test_dir + '/unvetted', exist_ok=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"n=0\n",
|
||||||
|
"random.seed();\n",
|
||||||
|
"for root, dirs, files in os.walk(sample_dir):\n",
|
||||||
|
" for name in files:\n",
|
||||||
|
" n=n+1\n",
|
||||||
|
" if random.uniform(0, n) < 1: rfile=os.path.join(root, name)\n",
|
||||||
|
"\n",
|
||||||
|
"shutil.copy(rfile, test_dir + '/unvetted/')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"model = load_model(model_file)\n",
|
||||||
|
"\n",
|
||||||
|
"test_image_generator = ImageDataGenerator(\n",
|
||||||
|
" rescale=1./255\n",
|
||||||
|
")\n",
|
||||||
|
"test_data_gen = test_image_generator.flow_from_directory(batch_size=1,\n",
|
||||||
|
" directory=test_dir,\n",
|
||||||
|
" target_size=(IMG_HEIGHT, IMG_WIDTH),\n",
|
||||||
|
" shuffle=True,\n",
|
||||||
|
" class_mode='binary')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"prediction = model.predict(\n",
|
||||||
|
" x=test_data_gen,\n",
|
||||||
|
" verbose=0\n",
|
||||||
|
")\n",
|
||||||
|
"predictions=[]\n",
|
||||||
|
"prediction_bool = (prediction >0.8)\n",
|
||||||
|
"predictions = prediction_bool.astype(int)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"waterfallpng=os.path.basename(rfile)\n",
|
||||||
|
"print('Random waterfall:', waterfallpng)\n",
|
||||||
|
"f=text.EvalFormatter()\n",
|
||||||
|
"obsid=(f.format(\"{waterfall[slice(10,17)]}\", waterfall=waterfallpng))\n",
|
||||||
|
"print('Observation URL: https://network.satnogs.org/observations/{}'.format(obsid))\n",
|
||||||
|
"if prediction_bool[0] == False:\n",
|
||||||
|
" rating = 'BAD'\n",
|
||||||
|
"else:\n",
|
||||||
|
" rating = 'GOOD'\n",
|
||||||
|
"print('AI Observation rating: %s' % (rating))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%capture\n",
|
||||||
|
"shutil.rmtree(test_dir)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"display(Image(filename=rfile, width=300))"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.6"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
|
@ -63,6 +63,15 @@
|
||||||
"import os"
|
"import os"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
|
@ -78,43 +87,23 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import tensorflow.python.keras"
|
"import tensorflow as tf\n",
|
||||||
]
|
"from tensorflow import keras\n",
|
||||||
},
|
"from tensorflow.keras import layers\n",
|
||||||
{
|
"from tensorflow.keras import optimizers\n",
|
||||||
"cell_type": "code",
|
"from tensorflow.keras import Sequential\n",
|
||||||
"execution_count": null,
|
"from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
||||||
"metadata": {},
|
"from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
||||||
"outputs": [],
|
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n",
|
||||||
"source": [
|
"from tensorflow.keras.layers import Input, concatenate\n",
|
||||||
"from tensorflow.python.keras import Sequential\n",
|
"from tensorflow.keras.models import load_model\n",
|
||||||
"from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense\n",
|
"from tensorflow.keras.models import Model\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n",
|
"from tensorflow.keras.preprocessing import image\n",
|
||||||
"from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D\n",
|
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
||||||
"from tensorflow.python.keras import optimizers\n",
|
"from tensorflow.keras.preprocessing.image import img_to_array\n",
|
||||||
"from tensorflow.python.keras.preprocessing import image\n",
|
"from tensorflow.keras.preprocessing.image import load_img\n",
|
||||||
"from tensorflow.python.keras.models import load_model\n",
|
"from tensorflow.keras.utils import model_to_dot\n",
|
||||||
"from tensorflow.python.keras.preprocessing.image import load_img\n",
|
"from tensorflow.keras.utils import plot_model"
|
||||||
"from tensorflow.python.keras.preprocessing.image import img_to_array"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from tensorflow.python.keras.models import Model\n",
|
|
||||||
"from tensorflow.python.keras.layers import Input, concatenate"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -178,9 +167,9 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"train_dir = os.path.join('data/', 'train')\n",
|
"train_dir = os.path.join('/srv/satnogs/data/', 'train')\n",
|
||||||
"val_dir = os.path.join('data/', 'val')\n",
|
"val_dir = os.path.join('/srv/satnogs/data/', 'val')\n",
|
||||||
"test_dir = os.path.join('data/', 'test')"
|
"test_dir = os.path.join('/srv/satnogs/data/', 'test')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -752,7 +741,7 @@
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3",
|
"display_name": "Python 3 (ipykernel)",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
|
@ -766,7 +755,7 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.3"
|
"version": "3.10.6"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
Binary file not shown.
After Width: | Height: | Size: 10 KiB |
Binary file not shown.
After Width: | Height: | Size: 2.0 KiB |
|
@ -0,0 +1,90 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||||
|
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||||
|
|
||||||
|
<svg
|
||||||
|
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||||
|
xmlns:cc="http://creativecommons.org/ns#"
|
||||||
|
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||||
|
xmlns:svg="http://www.w3.org/2000/svg"
|
||||||
|
xmlns="http://www.w3.org/2000/svg"
|
||||||
|
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||||
|
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||||
|
width="80mm"
|
||||||
|
height="80.000015mm"
|
||||||
|
viewBox="0 0 80 80.000015"
|
||||||
|
version="1.1"
|
||||||
|
id="svg38"
|
||||||
|
inkscape:version="0.92.4 (5da689c313, 2019-01-14)"
|
||||||
|
sodipodi:docname="drawing.svg">
|
||||||
|
<defs
|
||||||
|
id="defs32" />
|
||||||
|
<sodipodi:namedview
|
||||||
|
id="base"
|
||||||
|
pagecolor="#ffffff"
|
||||||
|
bordercolor="#666666"
|
||||||
|
borderopacity="1.0"
|
||||||
|
inkscape:pageopacity="0.0"
|
||||||
|
inkscape:pageshadow="2"
|
||||||
|
inkscape:zoom="1"
|
||||||
|
inkscape:cx="28.999953"
|
||||||
|
inkscape:cy="168.31035"
|
||||||
|
inkscape:document-units="mm"
|
||||||
|
inkscape:current-layer="layer1"
|
||||||
|
showgrid="false"
|
||||||
|
inkscape:window-width="1920"
|
||||||
|
inkscape:window-height="1025"
|
||||||
|
inkscape:window-x="0"
|
||||||
|
inkscape:window-y="0"
|
||||||
|
inkscape:window-maximized="1"
|
||||||
|
fit-margin-top="0"
|
||||||
|
fit-margin-left="0"
|
||||||
|
fit-margin-right="0"
|
||||||
|
fit-margin-bottom="0" />
|
||||||
|
<metadata
|
||||||
|
id="metadata35">
|
||||||
|
<rdf:RDF>
|
||||||
|
<cc:Work
|
||||||
|
rdf:about="">
|
||||||
|
<dc:format>image/svg+xml</dc:format>
|
||||||
|
<dc:type
|
||||||
|
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||||
|
<dc:title></dc:title>
|
||||||
|
</cc:Work>
|
||||||
|
</rdf:RDF>
|
||||||
|
</metadata>
|
||||||
|
<g
|
||||||
|
inkscape:label="Layer 1"
|
||||||
|
inkscape:groupmode="layer"
|
||||||
|
id="layer1"
|
||||||
|
transform="translate(-1.0680306e-5,-216.99998)">
|
||||||
|
<g
|
||||||
|
id="g936"
|
||||||
|
transform="matrix(5.2131426,0,0,5.2131426,-694.38438,-220.76129)">
|
||||||
|
<path
|
||||||
|
inkscape:connector-curvature="0"
|
||||||
|
id="path907"
|
||||||
|
d="m 133.78684,98.906581 c -0.58181,-0.407517 -0.58803,-0.484114 -0.58803,-7.235034 0,-6.274145 0.0389,-6.85839 0.48385,-7.261039 0.42372,-0.383464 1.32609,-0.437882 7.26104,-0.437882 6.70341,0 6.78167,0.0064 7.18906,0.588038 0.34625,0.494337 0.41188,1.647203 0.41188,7.235035 0,6.103483 -0.0396,6.682801 -0.48385,7.084878 -0.42302,0.382824 -1.31386,0.437882 -7.08488,0.437882 -5.54623,0 -6.69499,-0.06582 -7.18907,-0.411878 z"
|
||||||
|
style="fill:#000000;stroke-width:0.26458335" />
|
||||||
|
<path
|
||||||
|
inkscape:connector-curvature="0"
|
||||||
|
id="path905"
|
||||||
|
d="m 147.69797,98.471793 c 0.22984,-0.229836 0.3175,-2.125092 0.3175,-6.864164 0,-6.465201 -0.007,-6.551789 -0.58804,-6.958542 -0.49133,-0.344144 -1.60086,-0.411878 -6.74687,-0.411878 -5.80606,0 -6.18915,0.03031 -6.688,0.529167 -0.49886,0.498856 -0.52917,0.881944 -0.52917,6.688003 0,5.146009 0.0677,6.255541 0.41188,6.746876 0.40675,0.58072 0.49334,0.588038 6.95854,0.588038 4.73907,0 6.63433,-0.08767 6.86416,-0.3175 z"
|
||||||
|
style="fill:#caccc9;fill-opacity:1;stroke-width:0.26458335" />
|
||||||
|
<path
|
||||||
|
inkscape:connector-curvature="0"
|
||||||
|
id="path903"
|
||||||
|
d="m 134.31841,91.579397 0.071,-6.151563 6.37504,-0.03865 6.37505,-0.03864 0.17003,5.594895 c 0.0935,3.077193 0.0981,5.862787 0.0102,6.190209 L 147.16,97.73096 h -6.45631 -6.45631 z"
|
||||||
|
style="fill:#000000;stroke-width:0.26458335" />
|
||||||
|
<path
|
||||||
|
inkscape:connector-curvature="0"
|
||||||
|
id="path901"
|
||||||
|
d="m 146.95714,91.645547 v -5.820834 h -6.08542 -6.08541 v 5.820834 5.820833 h 6.08541 6.08542 z"
|
||||||
|
style="fill:#a7aad5;fill-opacity:1;stroke-width:0.26458335" />
|
||||||
|
<path
|
||||||
|
inkscape:connector-curvature="0"
|
||||||
|
id="path897"
|
||||||
|
d="m 137.16756,96.154016 c 0,-0.28252 0.13175,-1.128018 0.29278,-1.878883 l 0.29279,-1.365209 -1.3812,-1.398564 c -1.55267,-1.572202 -1.5667,-1.548981 1.12714,-1.865054 l 1.52214,-0.178595 0.82983,-1.556915 c 0.4564,-0.856304 0.91041,-1.556916 1.0089,-1.556916 0.0985,0 0.5199,0.713107 0.93648,1.584683 0.41657,0.871576 0.94448,1.637283 1.17313,1.701571 0.22864,0.06429 1.12855,0.25845 1.9998,0.431472 l 1.58408,0.314586 -1.49911,1.357281 -1.49912,1.357282 0.26746,1.676045 c 0.1471,0.921824 0.20793,1.735574 0.13517,1.808332 -0.0728,0.07276 -0.84847,-0.264454 -1.72382,-0.749359 l -1.59153,-0.881647 -1.73746,0.856782 c -1.60218,0.790068 -1.73746,0.816784 -1.73746,0.343108 z"
|
||||||
|
style="fill:#000000;stroke-width:0.26458335" />
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
After Width: | Height: | Size: 4.3 KiB |
Binary file not shown.
After Width: | Height: | Size: 789 KiB |
|
@ -0,0 +1,9 @@
|
||||||
|
opencv-python
|
||||||
|
pandas
|
||||||
|
pillow
|
||||||
|
portpicker
|
||||||
|
setuptools
|
||||||
|
simplejson
|
||||||
|
tensorflow_cpu
|
||||||
|
#tensorflow
|
||||||
|
#tensorflow_gpu
|
|
@ -0,0 +1,15 @@
|
||||||
|
ipython_blocking
|
||||||
|
ipywidgets
|
||||||
|
jupyterlab
|
||||||
|
matplotlib
|
||||||
|
pandas
|
||||||
|
pillow
|
||||||
|
pydot
|
||||||
|
seaborn
|
||||||
|
simplejson
|
||||||
|
sklearn
|
||||||
|
voila
|
||||||
|
|
||||||
|
tensorflow_cpu
|
||||||
|
#tensorflow
|
||||||
|
#tensorflow_gpu
|
|
@ -0,0 +1,15 @@
|
||||||
|
black[jupyter]
|
||||||
|
internetarchive
|
||||||
|
ipywidgets
|
||||||
|
jupyterlab
|
||||||
|
matplotlib
|
||||||
|
pandas
|
||||||
|
pydot
|
||||||
|
seaborn
|
||||||
|
sklearn
|
||||||
|
tensorboard
|
||||||
|
tensorboard-plugin-profile
|
||||||
|
|
||||||
|
tensorflow_cpu
|
||||||
|
#tensorflow_gpu
|
||||||
|
#tensorflow
|
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
jupyter-lab \
|
||||||
|
--config=/home/jebba/.jupyter/jupyter_notebook_config.py \
|
||||||
|
--notebook-dir=/home/jebba/devel/spacecruft/satnogs-wut/notebooks \
|
||||||
|
--app-dir=/home/jebba/.local/share/jupyter/lab \
|
||||||
|
1>>/home/jebba/log/jupyter.log 2>>/home/jebba/log/jupyter.err &
|
||||||
|
|
||||||
|
# --debug \
|
||||||
|
# --notebook-dir=/srv/satnogs/satnogs-wut/notebooks \
|
|
@ -0,0 +1,7 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# do better
|
||||||
|
killall jupyter-lab && echo "killed"
|
||||||
|
sleep 2
|
||||||
|
killall jupyter-lab && echo "again"
|
||||||
|
ps ax|grep /home/jebba/.local/bin/jupyter-lab | grep -v -e grep -e "/home/jebba/bin/spacecruft-jupyter-stop"
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# spacecruft-mount-cephfs
|
||||||
|
|
||||||
|
# Kernel
|
||||||
|
#sudo mount -t ceph 10.99.99.7:6789:/satnogs -o rw,name=ml,secret="`cat /home/jebba/.cephkey`" /srv/satnogs
|
||||||
|
|
||||||
|
# FUSE
|
||||||
|
# In theory should be newer/better that the kernel module.
|
||||||
|
sudo ceph-fuse \
|
||||||
|
--name=client.`hostname` \
|
||||||
|
--conf=/etc/ceph/ceph.conf \
|
||||||
|
--client_mountpoint /satnogs \
|
||||||
|
/srv/satnogs/
|
||||||
|
|
||||||
|
df -h /srv/satnogs
|
||||||
|
|
|
@ -0,0 +1,77 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# spacecruft-voila-start
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
cd /srv/satnogs/satnogs-wut/notebooks/
|
||||||
|
|
||||||
|
voila \
|
||||||
|
--ExecutePreprocessor.timeout=600 \
|
||||||
|
--no-browser \
|
||||||
|
--port=8867 \
|
||||||
|
--theme=dark \
|
||||||
|
--autoreload=True \
|
||||||
|
--template=default \
|
||||||
|
--Voila.ip=localhost \
|
||||||
|
--VoilaConfiguration.enable_nbextensions=False \
|
||||||
|
wut-web.ipynb \
|
||||||
|
1>>~/log/voila.log 2>>~/log/voila.err &
|
||||||
|
exit
|
||||||
|
|
||||||
|
--debug \
|
||||||
|
--strip_sources=False \
|
||||||
|
|
||||||
|
--base_url=<Unicode> (Voila.base_url)
|
||||||
|
Default: '/'
|
||||||
|
Path for voila API calls. If server_url is unset, this will be
|
||||||
|
used for both the base route of the server and the client. If
|
||||||
|
server_url is set, the server will server the routes prefixed by
|
||||||
|
server_url, while the client will prefix by base_url (this is
|
||||||
|
useful in reverse proxies).
|
||||||
|
--server_url=<Unicode> (Voila.server_url)
|
||||||
|
Default: None
|
||||||
|
Path to prefix to voila API handlers. Leave unset to default to base_url
|
||||||
|
--Voila.config_file_paths=<List>
|
||||||
|
Default: []
|
||||||
|
Paths to search for voila.(py|json)
|
||||||
|
--Voila.connection_dir_root=<Unicode>
|
||||||
|
Default: ''
|
||||||
|
Location of temporry connection files. Defaults to system
|
||||||
|
`tempfile.gettempdir()` value.
|
||||||
|
--Voila.custom_display_url=<Unicode>
|
||||||
|
Default: ''
|
||||||
|
Override URL shown to users. Replace actual URL, including protocol,
|
||||||
|
address, port and base URL, with the given value when displaying URL to the
|
||||||
|
users. Do not change the actual connection URL. If authentication token is
|
||||||
|
enabled, the token is added to the custom URL automatically. This option is
|
||||||
|
intended to be used when the URL to display to the user cannot be determined
|
||||||
|
reliably by the Jupyter notebook server (proxified or containerized setups
|
||||||
|
for example).
|
||||||
|
--Voila.log_level=<Enum>
|
||||||
|
Default: 30
|
||||||
|
Choices: (0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL')
|
||||||
|
Set the log level by value or name.
|
||||||
|
--Voila.root_dir=<Unicode>
|
||||||
|
Default: ''
|
||||||
|
The directory to use for notebooks.
|
||||||
|
|
||||||
|
--Voila.server_url=<Unicode>
|
||||||
|
Default: None
|
||||||
|
Path to prefix to voila API handlers. Leave unset to default to base_url
|
||||||
|
--Voila.static_paths=<List>
|
||||||
|
Default: ['/home/jebba/.local/lib/python3.7/site-packages/voila/static']
|
||||||
|
paths to static assets
|
||||||
|
--Voila.static_root=<Unicode>
|
||||||
|
Default: '/home/jebba/.local/lib/python3.7/site-packages/voila/static'
|
||||||
|
Directory holding static assets (HTML, JS and CSS files).
|
||||||
|
--Voila.template_paths=<List>
|
||||||
|
Default: []
|
||||||
|
path to nbconvert templates
|
||||||
|
--VoilaConfiguration.file_whitelist=['.*\\.(png|jpg|gif|svg)']
|
||||||
|
--VoilaConfiguration.resources=<Dict>
|
||||||
|
Default: {}
|
||||||
|
extra resources used by templates; example use with --template=reveal
|
||||||
|
--VoilaConfiguration.resources="{'reveal': {'transition': 'fade', 'scroll':
|
||||||
|
True}}"
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
killall voila
|
||||||
|
|
||||||
|
sleep 2
|
||||||
|
ps ax|grep voila | grep -v grep
|
||||||
|
sleep 3
|
||||||
|
ps ax|grep voila | grep -v grep
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
all:
|
||||||
|
mkdir -p ../bin
|
||||||
|
cp -p wut wut-aria-active wut-aria-add wut-aria-daemon wut-aria-info wut-aria-methods wut-aria-shutdown wut-aria-stat wut-aria-stopped wut-aria-waiting wut-audio-archive wut-audio-sha1 wut-compare wut-compare-all wut-compare-tx wut-compare-txmode wut-compare-txmode-csv wut-dl-sort wut-dl-sort-tx wut-dl-sort-txmode wut-dl-sort-txmode-all wut-files wut-files-data wut-files-data-all wut-ia-sha1 wut-ia-torrents wut-img-ck.py wut-ml wut-ml-auto wut-ml-load wut-ml-save wut-obs wut-ogg2wav wut-review-staging wut-rm-random wut-tf wut-tf.py wut-water wut-water-range wut-worker wut-worker-mas wut-worker-mas.py wut-worker.py ../bin/
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -fr ../bin
|
||||||
|
|
|
@ -9,15 +9,17 @@
|
||||||
# Example:
|
# Example:
|
||||||
# wut 1456893
|
# wut 1456893
|
||||||
|
|
||||||
|
cd /srv/satnogs
|
||||||
|
|
||||||
OBSID="$1"
|
OBSID="$1"
|
||||||
|
|
||||||
rm -rf data/test
|
rm -rf data/test
|
||||||
mkdir -p data/test/unvetted
|
mkdir -p data/test/unvetted
|
||||||
|
|
||||||
./wut-water $OBSID
|
wut-water $OBSID
|
||||||
|
|
||||||
[ -f download/$OBSID/waterfall_$OBSID_*.png ] || echo "failed"
|
[ -f download/$OBSID/waterfall_$OBSID_*.png ] || echo "failed"
|
||||||
[ -f download/$OBSID/waterfall_$OBSID_*.png ] || exit
|
[ -f download/$OBSID/waterfall_$OBSID_*.png ] || exit
|
||||||
cp -p download/$OBSID/waterfall_$OBSID_*.png data/test/unvetted/
|
cp -p download/$OBSID/waterfall_$OBSID_*.png data/test/unvetted/
|
||||||
./wut-ml 2>/dev/null | grep -e ^Observation -e "^\[\[" | sed -e 's/\[\[//' -e 's/\]\]//' -e 's/Observation: //g'
|
wut-ml 2>/dev/null | grep -e ^Observation -e "^\[\[" | sed -e 's/\[\[//' -e 's/\]\]//' -e 's/Observation: //g'
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
active=s.aria2.tellActive("token:yajnuAdCemNathNojdi")
|
||||||
|
|
||||||
|
pprint(active)
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
# All torrents
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-*/satnogs-observations-*_archive.torrent')))
|
||||||
|
|
||||||
|
# Added torrents
|
||||||
|
# dt-10
|
||||||
|
torrents=sorted(list(path.glob('**/satnogs-observations-000000001-000010000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0001?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0002?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0003?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0004?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0005?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0006?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0007?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0008?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
#torrents=sorted(list(path.glob('**/satnogs-observations-0009?0001-000??0000/satnogs-observations-*_archive.torrent')))
|
||||||
|
|
||||||
|
for i in torrents:
|
||||||
|
print(i.name)
|
||||||
|
s.aria2.addTorrent("token:yajnuAdCemNathNojdi",
|
||||||
|
xmlrpclib.Binary(open(i, mode='rb').read()))
|
||||||
|
time.sleep(10)
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
mkdir -p ~/log /srv/dl
|
||||||
|
|
||||||
|
ulimit -n 8192
|
||||||
|
|
||||||
|
aria2c \
|
||||||
|
--daemon=true \
|
||||||
|
--enable-rpc=true \
|
||||||
|
--dir=/srv/dl \
|
||||||
|
--rpc-listen-port=4800 \
|
||||||
|
--rpc-listen-all=false \
|
||||||
|
--rpc-secret=`cat /home/jebba/.aria-secret` \
|
||||||
|
--disable-ipv6=true \
|
||||||
|
--disk-cache=128M \
|
||||||
|
--file-allocation=falloc \
|
||||||
|
--log-level=notice \
|
||||||
|
--log=/home/jebba/log/aria.log \
|
||||||
|
--bt-max-open-files=1000 \
|
||||||
|
--bt-max-peers=1000 \
|
||||||
|
--continue=true \
|
||||||
|
--follow-torrent=mem \
|
||||||
|
--rpc-save-upload-metadata=false \
|
||||||
|
--max-concurrent-downloads=100 \
|
||||||
|
--bt-max-open-files=50000 \
|
||||||
|
--bt-max-peers=0 \
|
||||||
|
--allow-overwrite=true \
|
||||||
|
--max-download-result=0 \
|
||||||
|
--enable-mmap=true
|
||||||
|
|
||||||
|
exit
|
||||||
|
|
||||||
|
--deferred-input=true \
|
||||||
|
--enable-mmap
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
info=s.aria2.getSessionInfo("token:yajnuAdCemNathNojdi")
|
||||||
|
|
||||||
|
pprint(info)
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
methods=s.system.listMethods()
|
||||||
|
|
||||||
|
pprint((sorted)(methods))
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
shutdown=s.aria2.shutdown("token:yajnuAdCemNathNojdi")
|
||||||
|
|
||||||
|
pprint(shutdown)
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
stat=s.aria2.getGlobalStat("token:yajnuAdCemNathNojdi")
|
||||||
|
|
||||||
|
pprint(stat)
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
stopped=s.aria2.tellStopped("token:yajnuAdCemNathNojdi", 0, 9999)
|
||||||
|
|
||||||
|
pprint(stopped)
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import time
|
||||||
|
import xmlrpc.client as xmlrpclib
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
s = xmlrpclib.ServerProxy('http://localhost:4800/rpc')
|
||||||
|
path=Path('/srv/dl')
|
||||||
|
|
||||||
|
waiting=s.aria2.tellWaiting("token:yajnuAdCemNathNojdi", 0, 9999)
|
||||||
|
|
||||||
|
pprint(waiting)
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
# XXX Should check input is sane...
|
# XXX Should check input is sane...
|
||||||
|
|
||||||
APIURL="https://network.satnogs.org/api"
|
APIURL="https://network.satnogs.org/api"
|
||||||
DOWNDIR="download"
|
DOWNDIR="/srv/satnogs/download"
|
||||||
OBSIDMIN="$1"
|
OBSIDMIN="$1"
|
||||||
OBSIDMAX="$2"
|
OBSIDMAX="$2"
|
||||||
OBSID=$OBSIDMIN
|
OBSID=$OBSIDMIN
|
|
@ -12,12 +12,12 @@
|
||||||
OBSID="$1"
|
OBSID="$1"
|
||||||
|
|
||||||
# Download observation
|
# Download observation
|
||||||
./wut-water $OBSID
|
wut-water $OBSID
|
||||||
|
|
||||||
# Get previous rating
|
# Get previous rating
|
||||||
VET=`cat download/$OBSID/$OBSID.json | jq --compact-output '.[0] | {vetted_status}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
VET=`cat download/$OBSID/$OBSID.json | jq --compact-output '.[0] | {vetted_status}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
||||||
echo "Vetted Status: $VET"
|
echo "Vetted Status: $VET"
|
||||||
|
|
||||||
# Get Machine Learning Result
|
# Get Machine Learning Result
|
||||||
./wut $OBSID
|
wut $OBSID
|
||||||
|
|
|
@ -25,7 +25,7 @@ do
|
||||||
echo -n "Vet: $VET "
|
echo -n "Vet: $VET "
|
||||||
|
|
||||||
# Get Machine Learning Result
|
# Get Machine Learning Result
|
||||||
WUT_VET=`./wut $OBSID | cut -f 2 -d " "`
|
WUT_VET=`wut $OBSID | cut -f 2 -d " "`
|
||||||
echo -n "Wut: $WUT_VET "
|
echo -n "Wut: $WUT_VET "
|
||||||
if [ $VET = $WUT_VET ] ; then
|
if [ $VET = $WUT_VET ] ; then
|
||||||
let CORRECT=$CORRECT+1
|
let CORRECT=$CORRECT+1
|
|
@ -27,7 +27,7 @@ do
|
||||||
echo -n "$OBSID "
|
echo -n "$OBSID "
|
||||||
echo -n "Vet: $VET "
|
echo -n "Vet: $VET "
|
||||||
# Get Machine Learning Result
|
# Get Machine Learning Result
|
||||||
WUT_VETS=`./wut $OBSID`
|
WUT_VETS=`wut $OBSID`
|
||||||
WUT_VET=`echo $WUT_VETS | cut -f 2 -d " "`
|
WUT_VET=`echo $WUT_VETS | cut -f 2 -d " "`
|
||||||
WUT_RATE=`echo $WUT_VETS | cut -f 1 -d " "`
|
WUT_RATE=`echo $WUT_VETS | cut -f 1 -d " "`
|
||||||
echo -n "$WUT_VET, "
|
echo -n "$WUT_VET, "
|
|
@ -32,7 +32,7 @@ do
|
||||||
echo -n "$OBSID "
|
echo -n "$OBSID "
|
||||||
echo -n "Vet: $VET "
|
echo -n "Vet: $VET "
|
||||||
# Get Machine Learning Result
|
# Get Machine Learning Result
|
||||||
WUT_VETS=`./wut $OBSID | cut -f 2 -d " "`
|
WUT_VETS=`wut $OBSID | cut -f 2 -d " "`
|
||||||
WUT_VET=`echo $WUT_VETS | tail -1 | cut -f 2 -d " "`
|
WUT_VET=`echo $WUT_VETS | tail -1 | cut -f 2 -d " "`
|
||||||
WUT_RATE=`echo $WUT_VETS | head -1`
|
WUT_RATE=`echo $WUT_VETS | head -1`
|
||||||
echo -n "Wut: $WUT_VET "
|
echo -n "Wut: $WUT_VET "
|
|
@ -36,7 +36,7 @@ do
|
||||||
echo -n "$OBSID, "
|
echo -n "$OBSID, "
|
||||||
echo -n "$VET, "
|
echo -n "$VET, "
|
||||||
# Get Machine Learning Result
|
# Get Machine Learning Result
|
||||||
WUT_VETS=`./wut $OBSID`
|
WUT_VETS=`wut $OBSID`
|
||||||
WUT_VET=`echo $WUT_VETS | cut -f 2 -d " "`
|
WUT_VET=`echo $WUT_VETS | cut -f 2 -d " "`
|
||||||
WUT_RATE=`echo $WUT_VETS | cut -f 1 -d " "`
|
WUT_RATE=`echo $WUT_VETS | cut -f 1 -d " "`
|
||||||
echo -n "$WUT_VET, "
|
echo -n "$WUT_VET, "
|
|
@ -22,9 +22,11 @@ OBSIDMIN="$1"
|
||||||
OBSIDMAX="$2"
|
OBSIDMAX="$2"
|
||||||
OBSID=$OBSIDMIN
|
OBSID=$OBSIDMIN
|
||||||
|
|
||||||
|
cd /srv/satnogs
|
||||||
|
|
||||||
# Enable the following if you want to download waterfalls in this range:
|
# Enable the following if you want to download waterfalls in this range:
|
||||||
#echo "Downloading Waterfalls"
|
#echo "Downloading Waterfalls"
|
||||||
#./wut-water-range $OBSIDMIN $OBSIDMAX
|
#wut-water-range $OBSIDMIN $OBSIDMAX
|
||||||
|
|
||||||
# XXX remove data/train and data/val directories XXX
|
# XXX remove data/train and data/val directories XXX
|
||||||
echo "Removing data/ subdirectories"
|
echo "Removing data/ subdirectories"
|
|
@ -20,6 +20,8 @@
|
||||||
#
|
#
|
||||||
# Possible vetted_status: bad, failed, good, null, unknown.
|
# Possible vetted_status: bad, failed, good, null, unknown.
|
||||||
|
|
||||||
|
cd /srv/satnogs
|
||||||
|
|
||||||
OBSTX="$1"
|
OBSTX="$1"
|
||||||
OBSIDMIN="$2"
|
OBSIDMIN="$2"
|
||||||
OBSIDMAX="$3"
|
OBSIDMAX="$3"
|
||||||
|
@ -27,7 +29,7 @@ OBSID=$OBSIDMIN
|
||||||
|
|
||||||
# Enable the following if you want to download waterfalls in this range:
|
# Enable the following if you want to download waterfalls in this range:
|
||||||
#echo "Downloading Waterfalls"
|
#echo "Downloading Waterfalls"
|
||||||
#./wut-water-range $OBSIDMIN $OBSIDMAX
|
#wut-water-range $OBSIDMIN $OBSIDMAX
|
||||||
|
|
||||||
# XXX remove data/train and data/val directories XXX
|
# XXX remove data/train and data/val directories XXX
|
||||||
echo "Removing data/ subdirectories"
|
echo "Removing data/ subdirectories"
|
||||||
|
@ -44,7 +46,7 @@ while [ $OBSID -lt $OBSIDMAX ]
|
||||||
do cd $OBSID
|
do cd $OBSID
|
||||||
VET=`cat $OBSID.json | jq --compact-output '.[0] | {vetted_status}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
VET=`cat $OBSID.json | jq --compact-output '.[0] | {vetted_status}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
||||||
TX=`cat $OBSID.json | jq --compact-output '.[0] | {transmitter_uuid}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
TX=`cat $OBSID.json | jq --compact-output '.[0] | {transmitter_uuid}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
||||||
if [ $OBSTX = $TX ] ; then
|
if [ "$OBSTX" = "$TX" ] ; then
|
||||||
RAND_DIR=`echo $((0 + RANDOM % 2))`
|
RAND_DIR=`echo $((0 + RANDOM % 2))`
|
||||||
if [ $RAND_DIR = 1 ] ; then
|
if [ $RAND_DIR = 1 ] ; then
|
||||||
CLASS_DIR="train"
|
CLASS_DIR="train"
|
|
@ -1,20 +1,29 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# wut-dl-sort-txmode
|
# wut-dl-sort-txmode
|
||||||
#
|
#
|
||||||
|
# XXX This script removes directories in data/ !!! XXX
|
||||||
|
#
|
||||||
# Populates the data/ directory from the download/dir.
|
# Populates the data/ directory from the download/dir.
|
||||||
# Does it just for a specific transmitter mode (encoding)
|
# Does it just for a specific transmitter mode (encoding)
|
||||||
# Available encodings:
|
|
||||||
# AFSK AFSK1k2 AHRPT APT BPSK BPSK1k2 BPSK9k6 BPSK12k5 BPSK400 CERTO CW DUV
|
|
||||||
# FFSK1k2 FM FSK1k2 FSK4k8 FSK9k6 FSK19k2 GFSK1k2 GFSK2k4 GFSK4k8 GFSK9k6
|
|
||||||
# GFSK19k2 GFSK Rktr GMSK GMSK1k2 GMSK2k4 GMSK4k8 GMSK9k6 GMSK19k2 HRPT LRPT
|
|
||||||
# MSK1k2 MSK2k4 MSK4k8 PSK PSK31 SSTV USB WSJT
|
|
||||||
#
|
#
|
||||||
# XXX This script removes directories in data/ !!! XXX
|
# Available encodings:
|
||||||
|
# 4FSK AFSK_TUBiX10 AFSK AHRPT AM APT ASK BPSK_PMT-A3 BPSK CERTO CW DBPSK DOKA
|
||||||
|
# DPSK DQPSK DSTAR DUV DVB-S2 FFSK FMN FM FSK_AX.25_G3RUH FSK_AX.100_Mode_5
|
||||||
|
# FSK_AX.100_Mode_6 FSK GFSK_Rktr GFSK GFSK/BPSK GMSK_USP GMSK HRPT LRPT LSB
|
||||||
|
# LoRa MFSK MSK_AX.100_Mode_5 MSK_AX.100_Mode_6 MSK OFDM OQPSK PSK31 PSK63 PSK
|
||||||
|
# QPSK31 QPSK63 QPSK SSTV USB WSJT
|
||||||
|
#
|
||||||
|
# Encoding list generator:
|
||||||
|
# for i in `curl --silent https://db.satnogs.org/api/modes/ | jq '.[] | .name' | sort -V | sed -e 's/"//g' -e 's/ /_/g' -e 's/\//_/g'` ; do echo -n "$i " ; done ; echo
|
||||||
#
|
#
|
||||||
# Usage:
|
# Usage:
|
||||||
# wut-dl-sort-txmode [Encoding] [Minimum Observation ID] [Maximum Observation ID]
|
# wut-dl-sort-txmode [Encoding] [Minimum Observation ID] [Maximum Observation ID]
|
||||||
# Example:
|
# Example:
|
||||||
# wut-dl-sort-txmode CW 1467000 1470000
|
# wut-dl-sort-txmode CW 1467000 1470000
|
||||||
|
# For December, 2019 Example:
|
||||||
|
# wut-dl-sort-txmode CW 1292461 1470525
|
||||||
|
# For July, 2022 Example:
|
||||||
|
# wut-dl-sort-txmode BPSK1k2 6154228 6283338
|
||||||
#
|
#
|
||||||
# * Takes the files in the download/ dir.
|
# * Takes the files in the download/ dir.
|
||||||
# * Looks at the JSON files to see if it is :good", "bad", or "failed".
|
# * Looks at the JSON files to see if it is :good", "bad", or "failed".
|
||||||
|
@ -23,25 +32,31 @@
|
||||||
#
|
#
|
||||||
# Possible vetted_status: bad, failed, good, null, unknown.
|
# Possible vetted_status: bad, failed, good, null, unknown.
|
||||||
|
|
||||||
|
|
||||||
OBSENC="$1"
|
OBSENC="$1"
|
||||||
OBSIDMIN="$2"
|
OBSIDMIN="$2"
|
||||||
OBSIDMAX="$3"
|
OBSIDMAX="$3"
|
||||||
OBSID=$OBSIDMIN
|
OBSID=$OBSIDMIN
|
||||||
|
DATADIR="/srv/satnogs/data/txmodes/$OBSENC"
|
||||||
|
DOWNDIR="/srv/satnogs/download"
|
||||||
|
|
||||||
|
mkdir -p $DATADIR
|
||||||
|
cd $DATADIR || exit
|
||||||
|
|
||||||
# Enable the following if you want to download waterfalls in this range:
|
# Enable the following if you want to download waterfalls in this range:
|
||||||
#echo "Downloading Waterfalls"
|
#echo "Downloading Waterfalls"
|
||||||
#./wut-water-range $OBSIDMIN $OBSIDMAX
|
#wut-water-range $OBSIDMIN $OBSIDMAX
|
||||||
|
|
||||||
# XXX remove data/train and data/val directories XXX
|
# XXX remove data/train and data/val directories XXX
|
||||||
echo "Removing data/ subdirectories"
|
echo "Removing subdirectories"
|
||||||
rm -rf data/train data/val
|
rm -rf train/ val/
|
||||||
# Create new empty dirs
|
# Create new empty dirs
|
||||||
mkdir -p data/train/good data/train/bad data/train/failed
|
mkdir -p train/good/ train/bad/ train/failed/
|
||||||
mkdir -p data/val/good data/val/bad data/val/failed
|
mkdir -p val/good/ val/bad/ val/failed/
|
||||||
|
|
||||||
# Then parse each file and link appropriately
|
# Then parse each file and link appropriately
|
||||||
echo "Parsing download/ directory for observation IDs $OBSIDMIN to $OBSIDMAX"
|
echo "Parsing download/ directory for observation IDs $OBSIDMIN to $OBSIDMAX"
|
||||||
cd download/ || exit
|
cd $DOWNDIR || exit
|
||||||
|
|
||||||
while [ $OBSID -lt $OBSIDMAX ]
|
while [ $OBSID -lt $OBSIDMAX ]
|
||||||
do cd $OBSID
|
do cd $OBSID
|
||||||
|
@ -55,11 +70,11 @@ while [ $OBSID -lt $OBSIDMAX ]
|
||||||
CLASS_DIR="val"
|
CLASS_DIR="val"
|
||||||
fi
|
fi
|
||||||
case "$VET" in
|
case "$VET" in
|
||||||
bad) ln waterfall_$OBSID_*.png ../../data/$CLASS_DIR/$VET/
|
bad) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
;;
|
;;
|
||||||
good) ln waterfall_$OBSID_*.png ../../data/$CLASS_DIR/$VET/
|
good) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
;;
|
;;
|
||||||
failed) ln waterfall_$OBSID_*.png ../../data/$CLASS_DIR/$VET/
|
failed) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
;;
|
;;
|
||||||
null) echo "null, not copying"
|
null) echo "null, not copying"
|
||||||
;;
|
;;
|
||||||
|
@ -70,3 +85,4 @@ while [ $OBSID -lt $OBSIDMAX ]
|
||||||
let OBSID=$OBSID+1
|
let OBSID=$OBSID+1
|
||||||
cd ..
|
cd ..
|
||||||
done
|
done
|
||||||
|
|
|
@ -0,0 +1,89 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# wut-dl-sort-txmode-all
|
||||||
|
#
|
||||||
|
# XXX This script removes directories in data/ !!! XXX
|
||||||
|
#
|
||||||
|
# Training of all waterfalls. Used for modes that have few samples.
|
||||||
|
#
|
||||||
|
# Populates the data/ directory from the download/dir.
|
||||||
|
# Does it just for a specific transmitter mode (encoding)
|
||||||
|
#
|
||||||
|
# Available encodings:
|
||||||
|
# 4FSK AFSK_TUBiX10 AFSK AHRPT AM APT ASK BPSK_PMT-A3 BPSK CERTO CW DBPSK DOKA
|
||||||
|
# DPSK DQPSK DSTAR DUV DVB-S2 FFSK FMN FM FSK_AX.25_G3RUH FSK_AX.100_Mode_5
|
||||||
|
# FSK_AX.100_Mode_6 FSK GFSK_Rktr GFSK GFSK/BPSK GMSK_USP GMSK HRPT LRPT LSB
|
||||||
|
# LoRa MFSK MSK_AX.100_Mode_5 MSK_AX.100_Mode_6 MSK OFDM OQPSK PSK31 PSK63 PSK
|
||||||
|
# QPSK31 QPSK63 QPSK SSTV USB WSJT
|
||||||
|
#
|
||||||
|
# Encoding list generator:
|
||||||
|
# for i in `curl --silent https://db.satnogs.org/api/modes/ | jq '.[] | .name' | sort -V | sed -e 's/"//g' -e 's/ /_/g' -e 's/\//_/g'` ; do echo -n "$i " ; done ; echo
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# wut-dl-sort-txmode-all [Minimum Observation ID] [Maximum Observation ID]
|
||||||
|
# Example:
|
||||||
|
# wut-dl-sort-txmode-all 1467000 1470000
|
||||||
|
# For December, 2019 Example:
|
||||||
|
# wut-dl-sort-txmode-all 1292461 1470525
|
||||||
|
# wut-dl-sort-txmode-all 1292434 1470525
|
||||||
|
#
|
||||||
|
# * Takes the files in the download/ dir.
|
||||||
|
# * Looks at the JSON files to see if it is :good", "bad", or "failed".
|
||||||
|
# * Hard link it in the appropriate data/ directory.
|
||||||
|
# * File is randomly copied to either data/train or data/val directory.
|
||||||
|
#
|
||||||
|
# Possible vetted_status: bad, failed, good, null, unknown.
|
||||||
|
|
||||||
|
OBSENC="ALL"
|
||||||
|
OBSIDMIN="$1"
|
||||||
|
OBSIDMAX="$2"
|
||||||
|
OBSID=$OBSIDMIN
|
||||||
|
DATADIR="/srv/satnogs/data/txmodes/$OBSENC"
|
||||||
|
DOWNDIR="/srv/satnogs/download"
|
||||||
|
|
||||||
|
mkdir -p $DATADIR
|
||||||
|
cd $DATADIR || exit
|
||||||
|
|
||||||
|
# Enable the following if you want to download waterfalls in this range:
|
||||||
|
#echo "Downloading Waterfalls"
|
||||||
|
#wut-water-range $OBSIDMIN $OBSIDMAX
|
||||||
|
|
||||||
|
# XXX remove data/train and data/val directories XXX
|
||||||
|
echo "Removing subdirectories"
|
||||||
|
rm -rf train/ val/
|
||||||
|
# Create new empty dirs
|
||||||
|
mkdir -p train/good/ train/bad/ train/failed/
|
||||||
|
mkdir -p val/good/ val/bad/ val/failed/
|
||||||
|
|
||||||
|
# Then parse each file and link appropriately
|
||||||
|
echo "Parsing download/ directory for observation IDs $OBSIDMIN to $OBSIDMAX"
|
||||||
|
cd $DOWNDIR || exit
|
||||||
|
|
||||||
|
while [ $OBSID -lt $OBSIDMAX ]
|
||||||
|
do cd $OBSID
|
||||||
|
VET=`cat $OBSID.json | jq --compact-output '.[0] | {vetted_status}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
||||||
|
ENC=`cat $OBSID.json | jq --compact-output '.[0] | {transmitter_mode}' | cut -f 2 -d ":" | sed -e 's/}//g' -e 's/"//g'`
|
||||||
|
# Do all of them
|
||||||
|
if [ "$OBSENC" = "$OBSENC" ] ; then
|
||||||
|
RAND_DIR=`echo $((0 + RANDOM % 2))`
|
||||||
|
if [ $RAND_DIR = 1 ] ; then
|
||||||
|
CLASS_DIR="train"
|
||||||
|
else
|
||||||
|
CLASS_DIR="val"
|
||||||
|
fi
|
||||||
|
case "$VET" in
|
||||||
|
bad) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
|
;;
|
||||||
|
good) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
|
;;
|
||||||
|
failed) ln waterfall_$OBSID_*.png $DATADIR/$CLASS_DIR/$VET/
|
||||||
|
;;
|
||||||
|
null) echo "null, not copying"
|
||||||
|
;;
|
||||||
|
unknown) echo "unknown, not copying"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
let OBSID=$OBSID+1
|
||||||
|
cd ..
|
||||||
|
done
|
||||||
|
|
|
@ -8,6 +8,8 @@
|
||||||
# Example:
|
# Example:
|
||||||
# wut-files
|
# wut-files
|
||||||
|
|
||||||
|
cd /srv/satnogs
|
||||||
|
|
||||||
echo
|
echo
|
||||||
DF=`df -h download/`
|
DF=`df -h download/`
|
||||||
echo "$DF"
|
echo "$DF"
|
|
@ -0,0 +1,38 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# wut-files
|
||||||
|
#
|
||||||
|
# Tells you about what files you have in downloads/ and data/
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# wut-files
|
||||||
|
# Example:
|
||||||
|
# wut-files
|
||||||
|
|
||||||
|
cd /srv/satnogs
|
||||||
|
|
||||||
|
TRAIN=`find data/train -type f | wc -l`
|
||||||
|
echo
|
||||||
|
echo "Training Files: $TRAIN"
|
||||||
|
VAL=`find data/val -type f | wc -l`
|
||||||
|
echo "Validation Files: $VAL"
|
||||||
|
TEST=`find data/test -type f | wc -l`
|
||||||
|
echo "Testing Files: $TEST"
|
||||||
|
echo
|
||||||
|
TRAINGOOD=`find data/train/good/ -name '*.png' | wc -l`
|
||||||
|
echo "Training Good: $TRAINGOOD"
|
||||||
|
TRAINBAD=`find data/train/bad/ -name '*.png' | wc -l`
|
||||||
|
echo "Training Bad: $TRAINBAD"
|
||||||
|
TRAINFAILED=`find data/train/failed/ -name '*.png' | wc -l`
|
||||||
|
echo "Training Failed: $TRAINFAILED"
|
||||||
|
echo
|
||||||
|
VALGOOD=`find data/val/good/ -name '*.png' | wc -l`
|
||||||
|
echo "Validation Good: $VALGOOD"
|
||||||
|
VALBAD=`find data/val/bad/ -name '*.png' | wc -l`
|
||||||
|
echo "Validation Bad: $VALBAD"
|
||||||
|
VALFAILED=`find data/val/failed/ -name '*.png' | wc -l`
|
||||||
|
echo "Validation Failed: $VALFAILED"
|
||||||
|
echo
|
||||||
|
TESTWATER=`find data/test/ -name '*.png' | wc -l`
|
||||||
|
echo "Testing waterfalls: $TESTWATER"
|
||||||
|
echo
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# wut-files
|
||||||
|
#
|
||||||
|
# Tells you about what files you have in downloads/ and data/
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# wut-files
|
||||||
|
# Example:
|
||||||
|
# wut-files
|
||||||
|
|
||||||
|
cd /srv/satnogs/data/txmodes/ALL
|
||||||
|
|
||||||
|
ALL=`find . -type f | wc -l`
|
||||||
|
echo
|
||||||
|
echo "All Files: $ALL"
|
||||||
|
TRAIN=`find train -type f | wc -l`
|
||||||
|
echo
|
||||||
|
echo "Training Files: $TRAIN"
|
||||||
|
VAL=`find val -type f | wc -l`
|
||||||
|
echo "Validation Files: $VAL"
|
||||||
|
TRAINGOOD=`find train/good/ -name '*.png' | wc -l`
|
||||||
|
echo
|
||||||
|
echo "Training Good: $TRAINGOOD"
|
||||||
|
TRAINBAD=`find train/bad/ -name '*.png' | wc -l`
|
||||||
|
echo "Training Bad: $TRAINBAD"
|
||||||
|
TRAINFAILED=`find train/failed/ -name '*.png' | wc -l`
|
||||||
|
echo "Training Failed: $TRAINFAILED"
|
||||||
|
echo
|
||||||
|
VALGOOD=`find val/good/ -name '*.png' | wc -l`
|
||||||
|
echo "Validation Good: $VALGOOD"
|
||||||
|
VALBAD=`find val/bad/ -name '*.png' | wc -l`
|
||||||
|
echo "Validation Bad: $VALBAD"
|
||||||
|
VALFAILED=`find val/failed/ -name '*.png' | wc -l`
|
||||||
|
echo "Validation Failed: $VALFAILED"
|
||||||
|
echo
|
||||||
|
|
|
@ -0,0 +1,74 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# wut-ia-sha1 --- Verify downloaded files checksums
|
||||||
|
#
|
||||||
|
# XXX uses both ET and xml.parsers.expat
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
from xml.parsers.expat import ParserCreate, ExpatError, errors
|
||||||
|
from pathlib import Path
|
||||||
|
import hashlib
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
dl_dir=Path('/srv/dl')
|
||||||
|
|
||||||
|
def convertxml(xmlfile, xml_attribs=True):
|
||||||
|
with open(xmlfile, "rb") as f:
|
||||||
|
d = xmltodict.parse(f, xml_attribs=xml_attribs, process_namespaces=False)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
parser = argparse.ArgumentParser(description='sha1 check Internet Archive downloads')
|
||||||
|
parser.add_argument('observations',
|
||||||
|
type=str,
|
||||||
|
help='Observation set. Example: 006050001-006060000')
|
||||||
|
args = parser.parse_args()
|
||||||
|
obs_set = 'satnogs-observations-' + args.observations
|
||||||
|
obs_dir = Path(dl_dir, obs_set)
|
||||||
|
filename_xml = obs_set + '_files.xml'
|
||||||
|
print('filename XML:', filename_xml)
|
||||||
|
xmlfile = Path(obs_dir, filename_xml)
|
||||||
|
p = ParserCreate()
|
||||||
|
try:
|
||||||
|
p.ParseFile(open(xmlfile, 'rb'))
|
||||||
|
except:
|
||||||
|
print('No XML file to process')
|
||||||
|
exit()
|
||||||
|
|
||||||
|
return(xmlfile, obs_dir)
|
||||||
|
|
||||||
|
def get_sha1(filename):
|
||||||
|
sha1 = hashlib.sha1()
|
||||||
|
try:
|
||||||
|
with open(filename, 'rb') as f:
|
||||||
|
while True:
|
||||||
|
data = f.read(1048576)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
sha1.update(data)
|
||||||
|
return sha1.hexdigest()
|
||||||
|
|
||||||
|
except:
|
||||||
|
status='EXCEPTION'
|
||||||
|
|
||||||
|
def process_set(xmlfile, obs_dir):
|
||||||
|
root_node = ET.parse(xmlfile).getroot()
|
||||||
|
for tag in root_node.findall('file'):
|
||||||
|
name = tag.get('name')
|
||||||
|
for file_sha1 in tag.iter('sha1'):
|
||||||
|
filename = Path(obs_dir, name)
|
||||||
|
sha1_hash=get_sha1(filename)
|
||||||
|
if sha1_hash == file_sha1.text:
|
||||||
|
print('OK ', end='')
|
||||||
|
else:
|
||||||
|
print('FAIL ', end='')
|
||||||
|
print(name)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
xmlfile, obs_dir = parse_args()
|
||||||
|
process_set(xmlfile, obs_dir)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main();
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# wut-ia-torrents --- Download SatNOGS torrents from the Internet Archive.
|
||||||
|
#
|
||||||
|
# https://archive.org/details/satnogs
|
||||||
|
|
||||||
|
from internetarchive import get_item
|
||||||
|
from internetarchive import get_session
|
||||||
|
from internetarchive import download
|
||||||
|
from internetarchive import search_items
|
||||||
|
import time
|
||||||
|
|
||||||
|
# Download dir
|
||||||
|
obs_dl='/srv/dl'
|
||||||
|
|
||||||
|
s = get_session()
|
||||||
|
s.mount_http_adapter()
|
||||||
|
search_results = s.search_items('satnogs-observations')
|
||||||
|
for i in search_items('identifier:satnogs-observations-*'):
|
||||||
|
obs_id=(i['identifier'])
|
||||||
|
print('Collection', obs_id)
|
||||||
|
download(obs_id, verbose=True, glob_pattern='*.torrent',
|
||||||
|
checksum=True, destdir=obs_dl,
|
||||||
|
retries=4, ignore_errors=True)
|
||||||
|
|
||||||
|
download(obs_id, verbose=True, glob_pattern='*_files.xml',
|
||||||
|
checksum=True, destdir=obs_dl,
|
||||||
|
retries=4, ignore_errors=True)
|
||||||
|
|
||||||
|
time.sleep(3)
|
||||||
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
#!/usr/bin/python3
|
||||||
|
#
|
||||||
|
# wut-img-ck.py
|
||||||
|
#
|
||||||
|
# Validate images.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
# All of download...
|
||||||
|
for waterfall in glob.glob('/srv/satnogs/download/*/waterfall*.png'):
|
||||||
|
print(waterfall)
|
||||||
|
v_image = Image.open(waterfall)
|
||||||
|
v_image.verify()
|
||||||
|
|
||||||
|
# Individual training dirs
|
||||||
|
for waterfall in glob.glob('/srv/satnogs/data/train/good/waterfall*.png'):
|
||||||
|
print(waterfall)
|
||||||
|
v_image = Image.open(waterfall)
|
||||||
|
v_image.verify()
|
||||||
|
|
||||||
|
for waterfall in glob.glob('/srv/satnogs/data/train/bad/waterfall*.png'):
|
||||||
|
print(waterfall)
|
||||||
|
v_image = Image.open(waterfall)
|
||||||
|
v_image.verify()
|
||||||
|
|
||||||
|
for waterfall in glob.glob('/srv/satnogs/data/val/good/waterfall*.png'):
|
||||||
|
print(waterfall)
|
||||||
|
v_image = Image.open(waterfall)
|
||||||
|
v_image.verify()
|
||||||
|
|
||||||
|
for waterfall in glob.glob('/srv/satnogs/data/val/bad/waterfall*.png'):
|
||||||
|
print(waterfall)
|
||||||
|
v_image = Image.open(waterfall)
|
||||||
|
v_image.verify()
|
||||||
|
|
|
@ -16,25 +16,25 @@
|
||||||
import os
|
import os
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.python.keras
|
import tensorflow.python.keras
|
||||||
from tensorflow.python.keras import Sequential
|
from tensorflow.keras import Sequential
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
from tensorflow.python.keras import optimizers
|
from tensorflow.keras import optimizers
|
||||||
from tensorflow.python.keras.preprocessing import image
|
from tensorflow.keras.preprocessing import image
|
||||||
from tensorflow.python.keras.models import load_model
|
from tensorflow.keras.models import load_model
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
|
||||||
# XXX
|
# XXX
|
||||||
from tensorflow.python.keras.models import Model
|
from tensorflow.keras.models import Model
|
||||||
from tensorflow.python.keras.layers import Input, concatenate
|
from tensorflow.keras.layers import Input, concatenate
|
||||||
#from tensorflow.python.keras.optimizers import Adam
|
#from tensorflow.keras.optimizers import Adam
|
||||||
|
|
||||||
|
|
||||||
# XXX Plot
|
# XXX Plot
|
||||||
from tensorflow.python.keras.utils import plot_model
|
from tensorflow.keras.utils import plot_model
|
||||||
from tensorflow.python.keras.callbacks import ModelCheckpoint
|
from tensorflow.keras.callbacks import ModelCheckpoint
|
||||||
## for visualizing
|
## for visualizing
|
||||||
import matplotlib.pyplot as plt, numpy as np
|
import matplotlib.pyplot as plt, numpy as np
|
||||||
from sklearn.decomposition import PCA
|
from sklearn.decomposition import PCA
|
||||||
|
@ -68,9 +68,9 @@ datagen = ImageDataGenerator(
|
||||||
dtype='float32')
|
dtype='float32')
|
||||||
|
|
||||||
print("datagen.flow")
|
print("datagen.flow")
|
||||||
train_it = datagen.flow_from_directory('data/train/', class_mode='binary')
|
train_it = datagen.flow_from_directory('/srv/satnogs/data/train/', class_mode='binary')
|
||||||
val_it = datagen.flow_from_directory('data/val/', class_mode='binary')
|
val_it = datagen.flow_from_directory('/srv/satnogs/data/val/', class_mode='binary')
|
||||||
test_it = datagen.flow_from_directory('data/test/', class_mode='binary')
|
test_it = datagen.flow_from_directory('/srv/satnogs/data/test/', class_mode='binary')
|
||||||
|
|
||||||
print("train_it.next()")
|
print("train_it.next()")
|
||||||
trainX, trainY = train_it.next()
|
trainX, trainY = train_it.next()
|
|
@ -0,0 +1,278 @@
|
||||||
|
#!/usr/bin/python3
|
||||||
|
# wut-ml
|
||||||
|
#
|
||||||
|
# Vet a SatNOGS image using machine learning (guessing).
|
||||||
|
# It will vet the image located at test/unvetted/waterfall.png.
|
||||||
|
#
|
||||||
|
# Note, there is an issue to fix where it will vet everything
|
||||||
|
# under the data/test directory, so fix that. For now, just delete
|
||||||
|
# everything else. :)
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# wut-ml
|
||||||
|
# Example:
|
||||||
|
# wut-ml
|
||||||
|
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
import tensorflow.python.keras
|
||||||
|
from tensorflow.keras import Sequential
|
||||||
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
|
from tensorflow.keras import optimizers
|
||||||
|
from tensorflow.keras.preprocessing import image
|
||||||
|
from tensorflow.keras.models import load_model
|
||||||
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
|
||||||
|
# XXX
|
||||||
|
from tensorflow.keras.models import Model
|
||||||
|
from tensorflow.keras.layers import Input, concatenate
|
||||||
|
#from tensorflow.keras.optimizers import Adam
|
||||||
|
|
||||||
|
|
||||||
|
# XXX Plot
|
||||||
|
from tensorflow.keras.utils import plot_model
|
||||||
|
from tensorflow.keras.callbacks import ModelCheckpoint
|
||||||
|
## for visualizing
|
||||||
|
import matplotlib.pyplot as plt, numpy as np
|
||||||
|
from sklearn.decomposition import PCA
|
||||||
|
|
||||||
|
# https://keras.io/preprocessing/image/
|
||||||
|
# TODO:
|
||||||
|
# * Pre-process image
|
||||||
|
print("datagen")
|
||||||
|
datagen = ImageDataGenerator(
|
||||||
|
featurewise_center=False,
|
||||||
|
samplewise_center=False,
|
||||||
|
featurewise_std_normalization=False,
|
||||||
|
samplewise_std_normalization=False,
|
||||||
|
zca_whitening=False,
|
||||||
|
zca_epsilon=1e-06,
|
||||||
|
rescale=1./255,
|
||||||
|
shear_range=0.0,
|
||||||
|
zoom_range=0.0,
|
||||||
|
rotation_range=0,
|
||||||
|
width_shift_range=0.0,
|
||||||
|
height_shift_range=0.0,
|
||||||
|
brightness_range=None,
|
||||||
|
channel_shift_range=0.0,
|
||||||
|
fill_mode='nearest',
|
||||||
|
cval=0.0,
|
||||||
|
horizontal_flip=False,
|
||||||
|
vertical_flip=False,
|
||||||
|
preprocessing_function=None,
|
||||||
|
data_format='channels_last',
|
||||||
|
validation_split=0.0,
|
||||||
|
dtype='float32')
|
||||||
|
|
||||||
|
print("datagen.flow")
|
||||||
|
train_it = datagen.flow_from_directory('/srv/satnogs/data/train/', class_mode='binary')
|
||||||
|
val_it = datagen.flow_from_directory('/srv/satnogs/data/val/', class_mode='binary')
|
||||||
|
test_it = datagen.flow_from_directory('/srv/satnogs/data/test/', class_mode='binary')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
print("train_it.next()")
|
||||||
|
#batchX, batchy = train_it.next()
|
||||||
|
#print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))
|
||||||
|
trainX, trainY = train_it.next()
|
||||||
|
print('Batch shape=%s, min=%.3f, max=%.3f' % (trainX.shape, trainX.min(), trainX.max()))
|
||||||
|
valX, valY = val_it.next()
|
||||||
|
print('Batch shape=%s, min=%.3f, max=%.3f' % (valX.shape, valX.min(), valX.max()))
|
||||||
|
testX, testY = test_it.next()
|
||||||
|
print('Batch shape=%s, min=%.3f, max=%.3f' % (testX.shape, testX.min(), testX.max()))
|
||||||
|
|
||||||
|
print("input shape")
|
||||||
|
input_shape=trainX.shape[1:]
|
||||||
|
print(input_shape)
|
||||||
|
|
||||||
|
|
||||||
|
print("autoencoder")
|
||||||
|
# this is the size of our encoded representations
|
||||||
|
encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
|
||||||
|
# this is our input placeholder
|
||||||
|
#input_img = Input(shape=(784,))
|
||||||
|
input_img = Input(shape=(196608,))
|
||||||
|
# "encoded" is the encoded representation of the input
|
||||||
|
encoded = Dense(encoding_dim, activation='relu')(input_img)
|
||||||
|
# "decoded" is the lossy reconstruction of the input
|
||||||
|
decoded = Dense(196608, activation='sigmoid')(encoded)
|
||||||
|
#decoded = Dense(784, activation='sigmoid')(encoded)
|
||||||
|
# this model maps an input to its reconstruction
|
||||||
|
autoencoder = Model(input_img, decoded)
|
||||||
|
# this model maps an input to its encoded representation
|
||||||
|
encoder = Model(input_img, encoded)
|
||||||
|
# create a placeholder for an encoded (32-dimensional) input
|
||||||
|
encoded_input = Input(shape=(encoding_dim,))
|
||||||
|
# retrieve the last layer of the autoencoder model
|
||||||
|
decoder_layer = autoencoder.layers[-1]
|
||||||
|
# create the decoder model
|
||||||
|
decoder = Model(encoded_input, decoder_layer(encoded_input))
|
||||||
|
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
|
||||||
|
trainX = trainX.astype('float32') / 255.
|
||||||
|
valX = valX.astype('float32') / 255.
|
||||||
|
trainX = trainX.reshape((len(trainX), np.prod(trainX.shape[1:])))
|
||||||
|
valX = valX.reshape((len(valX), np.prod(valX.shape[1:])))
|
||||||
|
print("trainX.shape")
|
||||||
|
print(trainX.shape)
|
||||||
|
print("valX.shape")
|
||||||
|
print(valX.shape)
|
||||||
|
#batch_size=256,
|
||||||
|
autoencoder.fit(trainX, trainX,
|
||||||
|
epochs=50,
|
||||||
|
shuffle=True,
|
||||||
|
validation_data=(valX, valX))
|
||||||
|
encoded_imgs = encoder.predict(trainX)
|
||||||
|
decoded_imgs = decoder.predict(encoded_imgs)
|
||||||
|
# use Matplotlib (don't ask)
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
n = 10 # how many digits we will display
|
||||||
|
plt.figure(figsize=(20, 4))
|
||||||
|
for i in range(n):
|
||||||
|
# display original
|
||||||
|
ax = plt.subplot(2, n, i + 1)
|
||||||
|
plt.imshow(testX[i].reshape(28, 28))
|
||||||
|
plt.gray()
|
||||||
|
ax.get_xaxis().set_visible(False)
|
||||||
|
ax.get_yaxis().set_visible(False)
|
||||||
|
# display reconstruction
|
||||||
|
ax = plt.subplot(2, n, i + 1 + n)
|
||||||
|
plt.imshow(decoded_imgs[i].reshape(28, 28))
|
||||||
|
plt.gray()
|
||||||
|
ax.get_xaxis().set_visible(False)
|
||||||
|
ax.get_yaxis().set_visible(False)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#img_width=823
|
||||||
|
#img_height=1606
|
||||||
|
img_width=256
|
||||||
|
img_height=256
|
||||||
|
print("Height", img_height, "Width", img_width)
|
||||||
|
|
||||||
|
# https://keras.io/models/sequential/
|
||||||
|
# https://keras.io/getting-started/sequential-model-guide/
|
||||||
|
print("Sequential")
|
||||||
|
model = Sequential()
|
||||||
|
|
||||||
|
print("add")
|
||||||
|
# Other data to consider adding:
|
||||||
|
# * JSON metadata
|
||||||
|
# * TLE
|
||||||
|
# * Audio File (ogg)
|
||||||
|
# * Decoded Data (HEX, ASCII, PNG)
|
||||||
|
# Data from external sources to consider adding:
|
||||||
|
# * Weather
|
||||||
|
|
||||||
|
print("convolution 2 deeeee")
|
||||||
|
# https://keras.io/layers/convolutional/
|
||||||
|
#model.add(Convolution2D(32, 3, 3, input_shape=trainX.shape[1:]))
|
||||||
|
model.add(Convolution2D(32, 3, 3, input_shape=(255,255,3)))
|
||||||
|
# https://keras.io/activations/
|
||||||
|
print("Activation relu")
|
||||||
|
model.add(Activation('relu'))
|
||||||
|
# https://keras.io/layers/pooling/
|
||||||
|
print("Pooling")
|
||||||
|
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||||
|
print("Convolution2D")
|
||||||
|
model.add(Convolution2D(32, 3, 3))
|
||||||
|
print("Activation relu")
|
||||||
|
model.add(Activation('relu'))
|
||||||
|
print("Pooling")
|
||||||
|
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||||
|
print("Convolution2D")
|
||||||
|
model.add(Convolution2D(64, 3, 3))
|
||||||
|
print("Activation relu")
|
||||||
|
model.add(Activation('relu'))
|
||||||
|
print("Pooling")
|
||||||
|
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||||
|
# https://keras.io/layers/core/
|
||||||
|
print("Flatten")
|
||||||
|
model.add(Flatten())
|
||||||
|
# https://keras.io/layers/core/
|
||||||
|
print("Dense")
|
||||||
|
model.add(Dense(64))
|
||||||
|
print("Activation relu")
|
||||||
|
model.add(Activation('relu'))
|
||||||
|
# https://keras.io/layers/core/
|
||||||
|
print("Dropout")
|
||||||
|
model.add(Dropout(0.1))
|
||||||
|
print("Dense")
|
||||||
|
model.add(Dense(1))
|
||||||
|
print("Activation softmax")
|
||||||
|
model.add(Activation('softmax'))
|
||||||
|
|
||||||
|
# https://keras.io/models/sequential/
|
||||||
|
print("compile")
|
||||||
|
model.compile(
|
||||||
|
loss='categorical_crossentropy',
|
||||||
|
loss_weights=None,
|
||||||
|
sample_weight_mode=None,
|
||||||
|
weighted_metrics=None,
|
||||||
|
target_tensors=None,
|
||||||
|
optimizer='rmsprop',
|
||||||
|
metrics=['accuracy'])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# https://keras.io/models/sequential/
|
||||||
|
print("fit")
|
||||||
|
model.fit(
|
||||||
|
x=train_it,
|
||||||
|
y=None,
|
||||||
|
batch_size=None,
|
||||||
|
epochs=1,
|
||||||
|
verbose=2,
|
||||||
|
callbacks=None,
|
||||||
|
validation_split=0.0,
|
||||||
|
validation_data=val_it,
|
||||||
|
shuffle=True,
|
||||||
|
class_weight=None,
|
||||||
|
sample_weight=None,
|
||||||
|
initial_epoch=0,
|
||||||
|
steps_per_epoch=None,
|
||||||
|
validation_steps=None,
|
||||||
|
validation_freq=1,
|
||||||
|
max_queue_size=10,
|
||||||
|
workers=16,
|
||||||
|
use_multiprocessing=True)
|
||||||
|
|
||||||
|
# https://keras.io/models/sequential/
|
||||||
|
# evaluate(x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False)
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
# * Generate output to visualize training/validating/testing.
|
||||||
|
# Plot, fail
|
||||||
|
#print("plot")
|
||||||
|
#plot_model(test_it, to_file='data/wut-plot.png', show_shapes=True, show_layer_names=True)
|
||||||
|
|
||||||
|
# https://keras.io/models/sequential/
|
||||||
|
print("predict")
|
||||||
|
prediction = model.predict(
|
||||||
|
x=test_it,
|
||||||
|
batch_size=None,
|
||||||
|
verbose=2,
|
||||||
|
steps=None,
|
||||||
|
callbacks=None,
|
||||||
|
max_queue_size=10,
|
||||||
|
workers=16,
|
||||||
|
use_multiprocessing=True)
|
||||||
|
|
||||||
|
print(prediction)
|
||||||
|
|
||||||
|
if prediction[0][0] == 1:
|
||||||
|
rating = 'bad'
|
||||||
|
else:
|
||||||
|
rating = 'good'
|
||||||
|
print('Observation: %s' % (rating))
|
||||||
|
|
|
@ -20,17 +20,17 @@
|
||||||
import os
|
import os
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.python.keras
|
import tensorflow.python.keras
|
||||||
from tensorflow.python.keras import Sequential
|
from tensorflow.keras import Sequential
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
from tensorflow.python.keras import optimizers
|
from tensorflow.keras import optimizers
|
||||||
from tensorflow.python.keras.preprocessing import image
|
from tensorflow.keras.preprocessing import image
|
||||||
from tensorflow.python.keras.models import load_model
|
from tensorflow.keras.models import load_model
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
|
||||||
model = load_model('data/wut.h5')
|
model = load_model('/srv/satnogs/data/wut.h5')
|
||||||
img_width=256
|
img_width=256
|
||||||
img_height=256
|
img_height=256
|
||||||
model = Sequential()
|
model = Sequential()
|
|
@ -19,20 +19,20 @@
|
||||||
import os
|
import os
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow.python.keras
|
import tensorflow.python.keras
|
||||||
from tensorflow.python.keras import Sequential
|
from tensorflow.keras import Sequential
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
from tensorflow.python.keras import optimizers
|
from tensorflow.keras import optimizers
|
||||||
from tensorflow.python.keras.preprocessing import image
|
from tensorflow.keras.preprocessing import image
|
||||||
from tensorflow.python.keras.models import load_model
|
from tensorflow.keras.models import load_model
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
|
||||||
datagen = ImageDataGenerator()
|
datagen = ImageDataGenerator()
|
||||||
train_it = datagen.flow_from_directory('data/train/', class_mode='binary')
|
train_it = datagen.flow_from_directory('/srv/satnogs/data/train/', class_mode='binary')
|
||||||
val_it = datagen.flow_from_directory('data/val/', class_mode='binary')
|
val_it = datagen.flow_from_directory('/srv/satnogs/data/val/', class_mode='binary')
|
||||||
test_it = datagen.flow_from_directory('data/test/', class_mode='binary')
|
test_it = datagen.flow_from_directory('/srv/satnogs/data/test/', class_mode='binary')
|
||||||
batchX, batchy = train_it.next()
|
batchX, batchy = train_it.next()
|
||||||
print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))
|
print('Batch shape=%s, min=%.3f, max=%.3f' % (batchX.shape, batchX.min(), batchX.max()))
|
||||||
img_width=256
|
img_width=256
|
|
@ -7,7 +7,7 @@
|
||||||
# Download Observation: JSON. Not waterfall, audio, or data files.
|
# Download Observation: JSON. Not waterfall, audio, or data files.
|
||||||
|
|
||||||
APIURL="https://network.satnogs.org/api"
|
APIURL="https://network.satnogs.org/api"
|
||||||
DOWNDIR="download"
|
DOWNDIR="/srv/satnogs/download"
|
||||||
|
|
||||||
cd $DOWNDIR || exit
|
cd $DOWNDIR || exit
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# wut-review-staging
|
# wut-review-staging
|
||||||
# Go through all the images in data/staging and review them.
|
# Go through all the images in data/staging and review them.
|
||||||
cd data/staging || exit
|
cd /srv/satnogs/data/staging || exit
|
||||||
for i in *.png
|
for i in *.png
|
||||||
do echo $i
|
do echo $i
|
||||||
rm ../test/unvetted/*.png
|
rm ../test/unvetted/*.png
|
|
@ -22,7 +22,7 @@ KEEP=100
|
||||||
# this is so bad no one should ever run it again
|
# this is so bad no one should ever run it again
|
||||||
#exit 0
|
#exit 0
|
||||||
# XXX Delete data in this directory! XXX
|
# XXX Delete data in this directory! XXX
|
||||||
cd data/test/unvetted/ || exit
|
cd /srv/satnogs/data/test/unvetted/ || exit
|
||||||
|
|
||||||
TOTALFILES=`ls waterfall_*.png | wc -l`
|
TOTALFILES=`ls waterfall_*.png | wc -l`
|
||||||
for wf in waterfall_*.png
|
for wf in waterfall_*.png
|
|
@ -0,0 +1,25 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# wut-tf
|
||||||
|
#
|
||||||
|
# Starts worker client.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# wut-tf
|
||||||
|
# Example:
|
||||||
|
# wut-tf
|
||||||
|
#
|
||||||
|
# Note:
|
||||||
|
# Each node needs a unique index number.
|
||||||
|
#
|
||||||
|
# NOTE!
|
||||||
|
# This generates the node number based off the hostname.
|
||||||
|
# The hosts are ml0 through ml5.
|
||||||
|
|
||||||
|
HOSTNUM=`hostname | sed -e 's/ml//g'`
|
||||||
|
|
||||||
|
#export TF_CONFIG='{"cluster": {"worker": [ "ml0-int:2222", "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
||||||
|
export TF_CONFIG='{"cluster": {"worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}}'
|
||||||
|
|
||||||
|
echo $TF_CONFIG
|
||||||
|
python3 wut-tf.py
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# wut-tf.py
|
||||||
|
#
|
||||||
|
# https://spacecruft.org/spacecruft/satnogs-wut
|
||||||
|
#
|
||||||
|
# Distributed Learning
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||||
|
from __future__ import print_function
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
import datetime
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.python.keras
|
||||||
|
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
||||||
|
from tensorflow.keras import optimizers
|
||||||
|
from tensorflow.keras import Sequential
|
||||||
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
|
from tensorflow.keras.layers import Input, concatenate
|
||||||
|
from tensorflow.keras.models import load_model
|
||||||
|
from tensorflow.keras.models import Model
|
||||||
|
from tensorflow.keras.preprocessing import image
|
||||||
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
|
os.environ["TF_CONFIG"] = json.dumps({
|
||||||
|
"cluster": {
|
||||||
|
"worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222" ]
|
||||||
|
}#,
|
||||||
|
#"task": {"type": "worker", "index": 0 },
|
||||||
|
})
|
||||||
|
print("Tensorflow Version: ", tf.__version__)
|
||||||
|
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
|
||||||
|
print("Num CPUs Available: ", len(tf.config.experimental.list_physical_devices('CPU')))
|
||||||
|
print(tf.config.experimental.list_physical_devices())
|
||||||
|
#with tf.device("GPU:0"):
|
||||||
|
# tf.ones(()) # Make sure we can run on GPU
|
||||||
|
print("XLA_FLAGS='{}'".format(os.getenv("XLA_FLAGS")))
|
||||||
|
print(os.getenv("XLA_FLAGS"))
|
||||||
|
tf.keras.backend.clear_session()
|
||||||
|
IMG_HEIGHT = 416
|
||||||
|
IMG_WIDTH= 804
|
||||||
|
batch_size = 32
|
||||||
|
epochs = 4
|
||||||
|
BUFFER_SIZE = 10000
|
||||||
|
NUM_WORKERS = 6
|
||||||
|
GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS
|
||||||
|
#strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
|
||||||
|
#strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
|
||||||
|
# tf.distribute.experimental.CollectiveCommunication.RING)
|
||||||
|
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||||
|
NUM_TOTAL_IMAGES=100
|
||||||
|
tf.config.optimizer.set_jit(True)
|
||||||
|
#tf.summary.trace_on(profiler=True)
|
||||||
|
#tf.summary.trace_export(name=trace-export,profiler_outdir=logs)
|
||||||
|
options = tf.data.Options()
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
# Download Observation: JSON and waterfall. Not audio or data files.
|
# Download Observation: JSON and waterfall. Not audio or data files.
|
||||||
|
|
||||||
APIURL="https://network.satnogs.org/api"
|
APIURL="https://network.satnogs.org/api"
|
||||||
DOWNDIR="download"
|
DOWNDIR="/srv/satnogs/download"
|
||||||
|
|
||||||
cd $DOWNDIR || exit
|
cd $DOWNDIR || exit
|
||||||
|
|
|
@ -9,9 +9,25 @@
|
||||||
#
|
#
|
||||||
# The last observation to start in 2019 was 1470525
|
# The last observation to start in 2019 was 1470525
|
||||||
# The last observation to start in 2019-11 was 1292461
|
# The last observation to start in 2019-11 was 1292461
|
||||||
|
#
|
||||||
|
# APPROXIMATE:
|
||||||
|
# Observations 2015: 1-86
|
||||||
|
# Observations 2016: 87-613. Many in 15,000 range too
|
||||||
|
# Observations 2017: 614-55551
|
||||||
|
# Observations 2018: 55551-388962
|
||||||
|
# Observations 2019: 388963-1470939
|
||||||
|
# Observations 2020: 1470940-3394851
|
||||||
|
# Observations 2021: 3394852-5231193
|
||||||
|
# Observations 2022-01 2022-04: 5231194-5712616
|
||||||
|
# Observations 2022-05 5712617-6021303
|
||||||
|
# Observations 2022-06 6021304-6154227
|
||||||
|
# Observations 2022-07 6154228-6283338
|
||||||
|
#
|
||||||
# NOTE! Observations are not in numerical order by chronology.
|
# NOTE! Observations are not in numerical order by chronology.
|
||||||
# It looks like it is ordered by scheduling, so an older observation can have
|
# It looks like it is ordered by scheduling, so an older observation can have
|
||||||
# a higher observation ID.
|
# a higher observation ID. So the above list is rough, not exact.
|
||||||
|
# Also, there are exceptions, such as observations with IDs far higher than
|
||||||
|
# others that year.
|
||||||
#
|
#
|
||||||
# So to get mostly all of the observations in December, 2019, run:
|
# So to get mostly all of the observations in December, 2019, run:
|
||||||
# wut-water-range 1292461 1470525
|
# wut-water-range 1292461 1470525
|
||||||
|
@ -21,7 +37,7 @@
|
||||||
# XXX Should check input is sane...
|
# XXX Should check input is sane...
|
||||||
|
|
||||||
APIURL="https://network.satnogs.org/api"
|
APIURL="https://network.satnogs.org/api"
|
||||||
DOWNDIR="download"
|
DOWNDIR="/srv/satnogs/download"
|
||||||
OBSIDMIN="$1"
|
OBSIDMIN="$1"
|
||||||
OBSIDMAX="$2"
|
OBSIDMAX="$2"
|
||||||
OBSID=$OBSIDMIN
|
OBSID=$OBSIDMIN
|
||||||
|
@ -38,7 +54,7 @@ while [ $OBSID -lt $OBSIDMAX ]
|
||||||
--http2 --ipv4 \
|
--http2 --ipv4 \
|
||||||
--silent \
|
--silent \
|
||||||
--output $OBSID.json \
|
--output $OBSID.json \
|
||||||
"$APIURL/observations/?id=$OBSID&ground_station=&satellite__norad_cat_id=&transmitter_uuid=&transmitter_mode=&transmitter_type=&vetted_status=&vetted_user=&start=&end=" && sleep `echo $((0 + RANDOM % 2))`
|
"$APIURL/observations/?id=$OBSID&ground_station=&satellite__norad_cat_id=&transmitter_uuid=&transmitter_mode=&transmitter_type=&vetted_status=&vetted_user=&start=&end=" && sleep `echo $((0 + RANDOM % 1))`
|
||||||
WATERURL=`cat $OBSID.json | jq --compact-output '.[0] | {waterfall}' | cut -f 2- -d : | sed -e 's/}//g' -e 's/http:/https:/g' -e 's/"//g'`
|
WATERURL=`cat $OBSID.json | jq --compact-output '.[0] | {waterfall}' | cut -f 2- -d : | sed -e 's/}//g' -e 's/http:/https:/g' -e 's/"//g'`
|
||||||
WATERFILE=`basename $WATERURL`
|
WATERFILE=`basename $WATERURL`
|
||||||
[ ! -f "$WATERFILE" ] && \
|
[ ! -f "$WATERFILE" ] && \
|
||||||
|
@ -48,7 +64,7 @@ while [ $OBSID -lt $OBSIDMAX ]
|
||||||
--continue-at - \
|
--continue-at - \
|
||||||
--remote-time \
|
--remote-time \
|
||||||
--output $WATERFILE \
|
--output $WATERFILE \
|
||||||
$WATERURL && sleep `echo $((0 + RANDOM % 3))`
|
$WATERURL && sleep `echo $((0 + RANDOM % 1))`
|
||||||
cd ..
|
cd ..
|
||||||
let OBSID=$OBSID+1
|
let OBSID=$OBSID+1
|
||||||
done
|
done
|
|
@ -0,0 +1,26 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# wut-worker
|
||||||
|
#
|
||||||
|
# Starts worker client.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# wut-worker
|
||||||
|
# Example:
|
||||||
|
# wut-worker
|
||||||
|
#
|
||||||
|
# Note:
|
||||||
|
# Each node needs a unique index number.
|
||||||
|
#
|
||||||
|
# NOTE!
|
||||||
|
# This generates the node number based off the hostname.
|
||||||
|
# The hosts are rs-ml1 through rs-ml10. The index starts at zero,
|
||||||
|
# so the index is hostname minus one (without alpha).
|
||||||
|
|
||||||
|
HOSTNUM=`hostname | sed -e 's/rs-ml//g'`
|
||||||
|
let HOSTNUM=$HOSTNUM-1
|
||||||
|
|
||||||
|
export TF_CONFIG='{"cluster": {"worker": [ "rs-ml1:23009", "rs-ml2:23009", "rs-ml3:23009", "rs-ml4:23009", "rs-ml5:23009", "rs-ml6:23009", "rs-ml7:23009", "rs-ml8:23009", "rs-ml9:23009", "rs-ml10:23009"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
||||||
|
|
||||||
|
echo $TF_CONFIG
|
||||||
|
wut-worker.py
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# wut-worker-mas
|
||||||
|
#
|
||||||
|
# Starts worker client.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# wut-worker-mas
|
||||||
|
# Example:
|
||||||
|
# wut-worker-mas
|
||||||
|
#
|
||||||
|
# Note:
|
||||||
|
# Each node needs a unique index number.
|
||||||
|
#
|
||||||
|
# NOTE!
|
||||||
|
# This generates the node number based off the hostname.
|
||||||
|
# The hosts are rs-ml0 through rs-ml10.
|
||||||
|
|
||||||
|
HOSTNUM=`hostname | sed -e 's/rs-ml//g'`
|
||||||
|
|
||||||
|
#export TF_CONFIG='{"cluster": {"worker": [ "ml0-int:2222", "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
||||||
|
#export TF_CONFIG='{"cluster": {"worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}}'
|
||||||
|
export TF_CONFIG='{"cluster": {"worker": [ "rs-ml1:23009", "rs-ml2:23009", "rs-ml3:23009", "rs-ml4:23009", "rs-ml5:23009", "rs-ml6:23009", "rs-ml7:23009", "rs-ml8:23009", "rs-ml9:23009", "rs-ml10:23009"]}}'
|
||||||
|
#export TF_CONFIG='{"cluster": {"chief": [ "ml0-int:2222" ], "worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
||||||
|
|
||||||
|
echo $TF_CONFIG
|
||||||
|
wut-worker-mas.py
|
||||||
|
|
|
@ -0,0 +1,222 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# wut-worker-mas.py
|
||||||
|
#
|
||||||
|
# https://spacecruft.org/spacecruft/satnogs-wut
|
||||||
|
#
|
||||||
|
# Distributed Learning
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||||
|
from __future__ import print_function
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
import datetime
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.python.keras
|
||||||
|
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
||||||
|
from tensorflow.keras import optimizers
|
||||||
|
from tensorflow.keras import Sequential
|
||||||
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
|
from tensorflow.keras.layers import Input, concatenate
|
||||||
|
from tensorflow.keras.models import load_model
|
||||||
|
from tensorflow.keras.models import Model
|
||||||
|
from tensorflow.keras.preprocessing import image
|
||||||
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
|
|
||||||
|
tf.keras.backend.clear_session()
|
||||||
|
tf.config.optimizer.set_jit(True)
|
||||||
|
options = tf.data.Options()
|
||||||
|
os.environ["TF_CONFIG"] = json.dumps({
|
||||||
|
"cluster": {
|
||||||
|
"chief": [ "ml0-int:2222" ],
|
||||||
|
"worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222" ]
|
||||||
|
},
|
||||||
|
"task": {"type": "chief", "index": 0 },
|
||||||
|
})
|
||||||
|
|
||||||
|
print("Tensorflow Version: ", tf.__version__)
|
||||||
|
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
|
||||||
|
print("Num CPUs Available: ", len(tf.config.experimental.list_physical_devices('CPU')))
|
||||||
|
print("XLA_FLAGS='{}'".format(os.getenv("XLA_FLAGS")))
|
||||||
|
|
||||||
|
IMG_HEIGHT = 416
|
||||||
|
IMG_WIDTH= 804
|
||||||
|
batch_size = 32
|
||||||
|
epochs = 4
|
||||||
|
BUFFER_SIZE = 10000
|
||||||
|
NUM_WORKERS = 6
|
||||||
|
GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS
|
||||||
|
|
||||||
|
# XXX
|
||||||
|
POSITIVE_DIRECTORY = '/srv/satnogs/data/pos'
|
||||||
|
pos_dir = '/srv/satnogs/data/posdir'
|
||||||
|
|
||||||
|
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
|
||||||
|
tf.distribute.experimental.CollectiveCommunication.RING)
|
||||||
|
|
||||||
|
def get_bytes_and_label(filepath):
|
||||||
|
raw_bytes = tf.io.read_file(filepath)
|
||||||
|
label = tf.strings.regex_full_match(
|
||||||
|
POSITIVE_DIRECTORY, pos_dir + ".+")
|
||||||
|
return raw_bytes, label
|
||||||
|
|
||||||
|
def uncompiled_model():
|
||||||
|
model = Sequential([
|
||||||
|
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
|
||||||
|
MaxPooling2D(),
|
||||||
|
Conv2D(32, 3, padding='same', activation='relu'),
|
||||||
|
MaxPooling2D(),
|
||||||
|
Conv2D(64, 3, padding='same', activation='relu'),
|
||||||
|
MaxPooling2D(),
|
||||||
|
Flatten(),
|
||||||
|
Dense(512, activation='relu'),
|
||||||
|
Dense(1, activation='sigmoid')
|
||||||
|
])
|
||||||
|
return model
|
||||||
|
|
||||||
|
input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)
|
||||||
|
def process_image(image_bytes, label):
|
||||||
|
image = tf.io.decode_png(image_bytes)
|
||||||
|
#image = tf.image.resize(image, resolution)
|
||||||
|
image.set_shape(input_shape)
|
||||||
|
#image = image / 255. - 0.5
|
||||||
|
#image = tf.image.random_flip_left_right(image)
|
||||||
|
#image = tf.image.random_flip_up_down(image)
|
||||||
|
#image += tf.random.normal(
|
||||||
|
# image.shape, mean=0, steddev=0.1)
|
||||||
|
return image, tf.cast(label, tf.float32)
|
||||||
|
|
||||||
|
AUTOTUNE = tf.data.experimental.AUTOTUNE
|
||||||
|
NUM_TOTAL_IMAGES=100
|
||||||
|
data_root = "/srv/satnogs/data"
|
||||||
|
profile_dir = os.path.join(data_root, "profiles")
|
||||||
|
dataset = tf.data.Dataset.list_files(data_root)
|
||||||
|
dataset = dataset.shuffle(NUM_TOTAL_IMAGES)
|
||||||
|
dataset = dataset.map(get_bytes_and_label, num_parallel_calls=AUTOTUNE)
|
||||||
|
dataset = dataset.map(process_image, num_parallel_calls=AUTOTUNE)
|
||||||
|
dataset = dataset.batch(batch_size=32)
|
||||||
|
dataset = dataset.prefetch(buffer_size=AUTOTUNE)
|
||||||
|
|
||||||
|
os.makedirs(profile_dir, exist_ok=True)
|
||||||
|
|
||||||
|
# tf.data.Dataset.from_generator
|
||||||
|
#tf.summary.trace_on(profiler=True)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def compiled_model():
|
||||||
|
model = uncompiled_model()
|
||||||
|
model.compile(optimizer='adam',
|
||||||
|
loss='binary_crossentropy',
|
||||||
|
metrics=['accuracy'])
|
||||||
|
return model
|
||||||
|
|
||||||
|
with strategy.scope():
|
||||||
|
#model = tf.keras.applications.mobilenet_v2.MobileNetV2(...)
|
||||||
|
#optimizer = tf.keras.optimzers.SGD(learning_rate=0.01)
|
||||||
|
#loss_fn = tf.nn.sigmoid_cross_entropy_with_logits
|
||||||
|
#model.compile(..., optimizer=optimizer)
|
||||||
|
model = uncompiled_model()
|
||||||
|
model = compiled_model()
|
||||||
|
#model.fit(train_dataset, epochs=10)
|
||||||
|
model.fit(
|
||||||
|
train_data_gen,
|
||||||
|
steps_per_epoch=total_train // batch_size,
|
||||||
|
epochs=epochs,
|
||||||
|
validation_data=val_data_gen,
|
||||||
|
validation_steps=total_val // batch_size,
|
||||||
|
verbose=2
|
||||||
|
)
|
||||||
|
|
||||||
|
#tf.summary.trace_export(name=trace-export,profiler_outdir=logs)
|
||||||
|
|
||||||
|
with strategy.scope():
|
||||||
|
#model, loss_fn, optimzer = ...
|
||||||
|
@tf.function
|
||||||
|
def replicated_step(features, labels):
|
||||||
|
return strategy.experimental_run_v2(step, (features, labels))
|
||||||
|
with tf.GradientTape() as tape:
|
||||||
|
logits = model(features, training=True)
|
||||||
|
loss = tf.nn.compute_average_loss(
|
||||||
|
loss, global_batch_size=global_batch_size)
|
||||||
|
|
||||||
|
grads = tape.gradient(loss, model.trainable_variables)
|
||||||
|
optimizer.apply_gradients(zip(grads, model.trainable_variables))
|
||||||
|
return loss
|
||||||
|
|
||||||
|
data = strategy.experimental_distribute_dataset(data)
|
||||||
|
|
||||||
|
for features, labels in data:
|
||||||
|
loss = replicated_step(features, labels)
|
||||||
|
|
||||||
|
def data_generator():
|
||||||
|
batch = []
|
||||||
|
shuffle(data)
|
||||||
|
for image_path, label in data:
|
||||||
|
# Load from disk
|
||||||
|
image = imread(image_path)
|
||||||
|
# Resize
|
||||||
|
# image = resize(image, resolution)
|
||||||
|
# Horizontal and vertical flip
|
||||||
|
#image = random_flip(image)
|
||||||
|
# Normalize and add Gaussian noise
|
||||||
|
#image = normalize_and_add_noise(image)
|
||||||
|
batch.append((image, label))
|
||||||
|
handle_batching
|
||||||
|
|
||||||
|
# XXX ?
|
||||||
|
def handle_batching():
|
||||||
|
if len(batch) == batch_size:
|
||||||
|
yield concat(batch)
|
||||||
|
batch.reset()
|
||||||
|
|
||||||
|
train_dir = os.path.join('/srv/satnogs/data/', 'train')
|
||||||
|
val_dir = os.path.join('/srv/satnogs/data/', 'val')
|
||||||
|
train_good_dir = os.path.join(train_dir, 'good')
|
||||||
|
train_bad_dir = os.path.join(train_dir, 'bad')
|
||||||
|
val_good_dir = os.path.join(val_dir, 'good')
|
||||||
|
val_bad_dir = os.path.join(val_dir, 'bad')
|
||||||
|
num_train_good = len(os.listdir(train_good_dir))
|
||||||
|
num_train_bad = len(os.listdir(train_bad_dir))
|
||||||
|
num_val_good = len(os.listdir(val_good_dir))
|
||||||
|
num_val_bad = len(os.listdir(val_bad_dir))
|
||||||
|
total_train = num_train_good + num_train_bad
|
||||||
|
total_val = num_val_good + num_val_bad
|
||||||
|
|
||||||
|
print('total training good images:', num_train_good)
|
||||||
|
print('total training bad images:', num_train_bad)
|
||||||
|
print("--")
|
||||||
|
print("Total training images:", total_train)
|
||||||
|
print('total validation good images:', num_val_good)
|
||||||
|
print('total validation bad images:', num_val_bad)
|
||||||
|
print("--")
|
||||||
|
print("Total validation images:", total_val)
|
||||||
|
print("--")
|
||||||
|
print("Reduce training and validation set when testing")
|
||||||
|
#total_train = 16
|
||||||
|
#total_val = 16
|
||||||
|
print("Reduced training images:", total_train)
|
||||||
|
print("Reduced validation images:", total_val)
|
||||||
|
|
||||||
|
|
||||||
|
#train_image_generator = ImageDataGenerator(
|
||||||
|
# rescale=1./255
|
||||||
|
#)
|
||||||
|
#val_image_generator = ImageDataGenerator(
|
||||||
|
# rescale=1./255
|
||||||
|
#)
|
||||||
|
|
||||||
|
#train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
|
||||||
|
# directory=train_dir,
|
||||||
|
# shuffle=True,
|
||||||
|
# target_size=(IMG_HEIGHT, IMG_WIDTH),
|
||||||
|
# class_mode='binary')
|
||||||
|
#val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,
|
||||||
|
# directory=val_dir,
|
||||||
|
# target_size=(IMG_HEIGHT, IMG_WIDTH),
|
||||||
|
# class_mode='binary')
|
||||||
|
|
|
@ -0,0 +1,255 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# wut-worker.py
|
||||||
|
#
|
||||||
|
# wut --- What U Think? SatNOGS Observation AI, training application cluster edition.
|
||||||
|
#
|
||||||
|
# https://spacecruft.org/spacecruft/satnogs-wut
|
||||||
|
#
|
||||||
|
# Based on data/train and data/val directories builds a wut.tf file.
|
||||||
|
# GPLv3+
|
||||||
|
# Built using Jupyter, Tensorflow, Keras
|
||||||
|
|
||||||
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||||
|
from __future__ import print_function
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
import simplejson as json
|
||||||
|
import datetime
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow.python.keras
|
||||||
|
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
||||||
|
from tensorflow.keras import optimizers
|
||||||
|
from tensorflow.keras import Sequential
|
||||||
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
|
||||||
|
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
||||||
|
from tensorflow.keras.layers import Input, concatenate
|
||||||
|
from tensorflow.keras.models import load_model
|
||||||
|
from tensorflow.keras.models import Model
|
||||||
|
from tensorflow.keras.preprocessing import image
|
||||||
|
from tensorflow.keras.preprocessing.image import img_to_array
|
||||||
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||||
|
from tensorflow.keras.preprocessing.image import load_img
|
||||||
|
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
|
||||||
|
get_ipython().run_line_magic('matplotlib', 'inline')
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import seaborn as sns
|
||||||
|
#from sklearn.decomposition import PCA
|
||||||
|
#from ipywidgets import interact, interactive, fixed, interact_manual
|
||||||
|
#import ipywidgets as widgets
|
||||||
|
#from IPython.display import display, Image
|
||||||
|
|
||||||
|
print('tf {}'.format(tf.__version__))
|
||||||
|
|
||||||
|
os.environ["TF_CONFIG"] = json.dumps({
|
||||||
|
"cluster": {
|
||||||
|
"worker": ["ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222" ]
|
||||||
|
},
|
||||||
|
"task": {"type": "worker", "index": 0 },
|
||||||
|
"num_workers": 5
|
||||||
|
})
|
||||||
|
|
||||||
|
IMG_HEIGHT = 416
|
||||||
|
IMG_WIDTH= 804
|
||||||
|
batch_size = 32
|
||||||
|
epochs = 1
|
||||||
|
# Full size, machine barfs probably needs more RAM
|
||||||
|
#IMG_HEIGHT = 832
|
||||||
|
#IMG_WIDTH = 1606
|
||||||
|
# Good results
|
||||||
|
#batch_size = 128
|
||||||
|
#epochs = 6
|
||||||
|
|
||||||
|
tf.keras.backend.clear_session()
|
||||||
|
|
||||||
|
options = tf.data.Options()
|
||||||
|
#options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
|
||||||
|
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.DATA
|
||||||
|
# XXX
|
||||||
|
#dataset = dataset.with_options(options)
|
||||||
|
|
||||||
|
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
|
||||||
|
tf.distribute.experimental.CollectiveCommunication.RING)
|
||||||
|
#mirrored_strategy = tf.distribute.MirroredStrategy(
|
||||||
|
# cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
|
||||||
|
|
||||||
|
root_data_dir = ('/srv/satnogs')
|
||||||
|
train_dir = os.path.join(root_data_dir, 'data/', 'train')
|
||||||
|
val_dir = os.path.join(root_data_dir,'data/', 'val')
|
||||||
|
train_good_dir = os.path.join(train_dir, 'good')
|
||||||
|
train_bad_dir = os.path.join(train_dir, 'bad')
|
||||||
|
val_good_dir = os.path.join(val_dir, 'good')
|
||||||
|
val_bad_dir = os.path.join(val_dir, 'bad')
|
||||||
|
num_train_good = len(os.listdir(train_good_dir))
|
||||||
|
num_train_bad = len(os.listdir(train_bad_dir))
|
||||||
|
num_val_good = len(os.listdir(val_good_dir))
|
||||||
|
num_val_bad = len(os.listdir(val_bad_dir))
|
||||||
|
total_train = num_train_good + num_train_bad
|
||||||
|
total_val = num_val_good + num_val_bad
|
||||||
|
|
||||||
|
print('total training good images:', num_train_good)
|
||||||
|
print('total training bad images:', num_train_bad)
|
||||||
|
print("--")
|
||||||
|
print("Total training images:", total_train)
|
||||||
|
print('total validation good images:', num_val_good)
|
||||||
|
print('total validation bad images:', num_val_bad)
|
||||||
|
print("--")
|
||||||
|
print("Total validation images:", total_val)
|
||||||
|
print("--")
|
||||||
|
print("Reduce training and validation set when testing")
|
||||||
|
total_train = 100
|
||||||
|
total_val = 100
|
||||||
|
print("Reduced training images:", total_train)
|
||||||
|
print("Reduced validation images:", total_val)
|
||||||
|
|
||||||
|
train_image_generator = ImageDataGenerator(
|
||||||
|
rescale=1./255
|
||||||
|
)
|
||||||
|
val_image_generator = ImageDataGenerator(
|
||||||
|
rescale=1./255
|
||||||
|
)
|
||||||
|
#train_data_gen = train_image_generator.flow_from_directory(batch_size=GLOBAL_BATCH_SIZE,
|
||||||
|
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
|
||||||
|
directory=train_dir,
|
||||||
|
shuffle=True,
|
||||||
|
target_size=(IMG_HEIGHT, IMG_WIDTH),
|
||||||
|
class_mode='binary')
|
||||||
|
#val_data_gen = val_image_generator.flow_from_directory(batch_size=GLOBAL_BATCH_SIZE,
|
||||||
|
val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,
|
||||||
|
directory=val_dir,
|
||||||
|
target_size=(IMG_HEIGHT, IMG_WIDTH),
|
||||||
|
class_mode='binary')
|
||||||
|
#train_dist_dataset = strategy.experimental_distribute_dataset()
|
||||||
|
#val_dist_dataset = strategy.experimental_distribute_dataset()
|
||||||
|
sample_train_images, _ = next(train_data_gen)
|
||||||
|
sample_val_images, _ = next(val_data_gen)
|
||||||
|
# This function will plot images in the form of a grid with 1 row and 3 columns where images are placed in each column.
|
||||||
|
def plotImages(images_arr):
|
||||||
|
fig, axes = plt.subplots(1, 3, figsize=(20,20))
|
||||||
|
axes = axes.flatten()
|
||||||
|
for img, ax in zip( images_arr, axes):
|
||||||
|
ax.imshow(img)
|
||||||
|
ax.axis('off')
|
||||||
|
plt.tight_layout()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
#plotImages(sample_train_images[0:3])
|
||||||
|
#plotImages(sample_val_images[0:3])
|
||||||
|
#get_ipython().run_line_magic('load_ext', 'tensorboard')
|
||||||
|
#get_ipython().system('rm -rf ./clusterlogs/')
|
||||||
|
#log_dir="clusterlogs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
|
#log_dir="clusterlogs"
|
||||||
|
#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
|
||||||
|
#tensorboard_callback = tensorflow.keras.callbacks.TensorBoard(log_dir=log_dir)
|
||||||
|
#%tensorboard --logdir clusterlogs --port 6006
|
||||||
|
|
||||||
|
strategy.num_replicas_in_sync
|
||||||
|
|
||||||
|
## Compute global batch size using number of replicas.
|
||||||
|
#GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS
|
||||||
|
BATCH_SIZE_PER_REPLICA = 8
|
||||||
|
print("BATCH_SIZE_PER_REPLICA", BATCH_SIZE_PER_REPLICA)
|
||||||
|
print("strategy.num_replicas_in_sync", strategy.num_replicas_in_sync)
|
||||||
|
global_batch_size = (BATCH_SIZE_PER_REPLICA *
|
||||||
|
strategy.num_replicas_in_sync)
|
||||||
|
print("global_batch_size", global_batch_size)
|
||||||
|
print("total_train", total_train)
|
||||||
|
print("total_val ", total_val)
|
||||||
|
print("batch_size", batch_size)
|
||||||
|
print("total_train // batch_size", total_train // batch_size)
|
||||||
|
print("total_val // batch_size", total_val // batch_size)
|
||||||
|
#.batch(global_batch_size)
|
||||||
|
#dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100)
|
||||||
|
#dataset = dataset.batch(global_batch_size)
|
||||||
|
#LEARNING_RATES_BY_BATCH_SIZE = {5: 0.1, 10: 0.15}
|
||||||
|
|
||||||
|
#learning_rate = LEARNING_RATES_BY_BATCH_SIZE[global_batch_size]
|
||||||
|
|
||||||
|
def get_uncompiled_model():
|
||||||
|
model = Sequential([
|
||||||
|
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
|
||||||
|
MaxPooling2D(),
|
||||||
|
Conv2D(32, 3, padding='same', activation='relu'),
|
||||||
|
MaxPooling2D(),
|
||||||
|
Conv2D(64, 3, padding='same', activation='relu'),
|
||||||
|
MaxPooling2D(),
|
||||||
|
Flatten(),
|
||||||
|
Dense(512, activation='relu'),
|
||||||
|
Dense(1, activation='sigmoid')
|
||||||
|
])
|
||||||
|
return model
|
||||||
|
|
||||||
|
#get_uncompiled_model()
|
||||||
|
def get_compiled_model():
|
||||||
|
model = get_uncompiled_model()
|
||||||
|
model.compile(optimizer='adam',
|
||||||
|
loss='binary_crossentropy',
|
||||||
|
metrics=['accuracy'])
|
||||||
|
return model
|
||||||
|
|
||||||
|
# Create a checkpoint directory to store the checkpoints.
|
||||||
|
#checkpoint_dir = './training_checkpoints'
|
||||||
|
#checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
|
||||||
|
#callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='tmp/keras-ckpt')]
|
||||||
|
#callbacks=[tensorboard_callback,callbacks]
|
||||||
|
|
||||||
|
#def get_fit_model():
|
||||||
|
# model = get_compiled_model()
|
||||||
|
# model.fit(
|
||||||
|
# train_data_gen,
|
||||||
|
# steps_per_epoch=total_train // batch_size,
|
||||||
|
# epochs=epochs,
|
||||||
|
# validation_data=val_data_gen,
|
||||||
|
# validation_steps=total_val // batch_size,
|
||||||
|
# verbose=2
|
||||||
|
# )
|
||||||
|
#return model
|
||||||
|
|
||||||
|
with strategy.scope():
|
||||||
|
model = get_compiled_model()
|
||||||
|
history = model.fit(
|
||||||
|
train_data_gen,
|
||||||
|
steps_per_epoch=total_train // batch_size,
|
||||||
|
epochs=epochs,
|
||||||
|
validation_data=val_data_gen,
|
||||||
|
validation_steps=total_val // batch_size,
|
||||||
|
verbose=2
|
||||||
|
).batch(global_batch_size)
|
||||||
|
|
||||||
|
#model.summary()
|
||||||
|
|
||||||
|
print("TRAINING info")
|
||||||
|
print(train_dir)
|
||||||
|
print(train_good_dir)
|
||||||
|
print(train_bad_dir)
|
||||||
|
print(train_image_generator)
|
||||||
|
print(train_data_gen)
|
||||||
|
#print(sample_train_images)
|
||||||
|
#print(history)
|
||||||
|
#model.to_json()
|
||||||
|
|
||||||
|
#history = model.fit(X, y, batch_size=32, epochs=40, validation_split=0.1)
|
||||||
|
|
||||||
|
model.save('data/models/FOO/wut-train-cluster2.tf')
|
||||||
|
model.save('data/models/FOO/wut-train-cluster2.h5')
|
||||||
|
model.save_weights('data/models/FOO/wut-weights-train-cluster2.tf')
|
||||||
|
model.save_weights('data/models/FOO/wut-weights-train-cluster2.h5')
|
||||||
|
|
||||||
|
acc = history.history['accuracy']
|
||||||
|
val_acc = history.history['val_accuracy']
|
||||||
|
loss = history.history['loss']
|
||||||
|
val_loss = history.history['val_loss']
|
||||||
|
epochs_range = range(epochs)
|
||||||
|
plt.figure(figsize=(8, 8))
|
||||||
|
plt.subplot(1, 2, 1)
|
||||||
|
plt.plot(epochs_range, acc, label='Training Accuracy')
|
||||||
|
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
|
||||||
|
plt.legend(loc='lower right')
|
||||||
|
plt.title('Training and Validation Accuracy')
|
||||||
|
plt.subplot(1, 2, 2)
|
||||||
|
plt.plot(epochs_range, loss, label='Training Loss')
|
||||||
|
plt.plot(epochs_range, val_loss, label='Validation Loss')
|
||||||
|
plt.legend(loc='upper right')
|
||||||
|
plt.title('Training and Validation Loss')
|
||||||
|
plt.show()
|
||||||
|
|
|
@ -1,160 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
# wut-train-cluster-fn.py --- What U Think? SatNOGS Observation AI, training application cluster edition.
|
|
||||||
#
|
|
||||||
# https://spacecruft.org/spacecruft/satnogs-wut
|
|
||||||
#
|
|
||||||
# Based on data/train and data/val directories builds a wut.tf file.
|
|
||||||
# GPLv3+
|
|
||||||
# Built using Jupyter, Tensorflow, Keras
|
|
||||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
||||||
from __future__ import print_function
|
|
||||||
import os
|
|
||||||
import numpy as np
|
|
||||||
import simplejson as json
|
|
||||||
import datetime
|
|
||||||
import tensorflow as tf
|
|
||||||
import tensorflow.python.keras
|
|
||||||
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
|
||||||
from tensorflow.python.keras import optimizers
|
|
||||||
from tensorflow.python.keras import Sequential
|
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
|
||||||
from tensorflow.python.keras.layers import Input, concatenate
|
|
||||||
from tensorflow.python.keras.models import load_model
|
|
||||||
from tensorflow.python.keras.models import Model
|
|
||||||
from tensorflow.python.keras.preprocessing import image
|
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
|
||||||
|
|
||||||
# Environmetal variables are set by shell script that launches this python script.
|
|
||||||
#os.environ["TF_CONFIG"] = json.dumps({
|
|
||||||
# "cluster": {
|
|
||||||
# "worker": [ "10.100.100.130:2222", "ml1:2222", "ml2:2222", "ml3:2222", "ml4:2222", "ml5:2222" ]
|
|
||||||
# },
|
|
||||||
# "task": {"type": "worker", "index": 0 },
|
|
||||||
# "num_workers": 5
|
|
||||||
#})
|
|
||||||
IMG_HEIGHT = 416
|
|
||||||
IMG_WIDTH= 804
|
|
||||||
batch_size = 32
|
|
||||||
epochs = 4
|
|
||||||
|
|
||||||
# XXX
|
|
||||||
#tf.keras.backend.clear_session()
|
|
||||||
|
|
||||||
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
|
|
||||||
tf.distribute.experimental.CollectiveCommunication.RING)
|
|
||||||
|
|
||||||
train_dir = os.path.join('data/', 'train')
|
|
||||||
val_dir = os.path.join('data/', 'val')
|
|
||||||
train_good_dir = os.path.join(train_dir, 'good')
|
|
||||||
train_bad_dir = os.path.join(train_dir, 'bad')
|
|
||||||
val_good_dir = os.path.join(val_dir, 'good')
|
|
||||||
val_bad_dir = os.path.join(val_dir, 'bad')
|
|
||||||
num_train_good = len(os.listdir(train_good_dir))
|
|
||||||
num_train_bad = len(os.listdir(train_bad_dir))
|
|
||||||
num_val_good = len(os.listdir(val_good_dir))
|
|
||||||
num_val_bad = len(os.listdir(val_bad_dir))
|
|
||||||
total_train = num_train_good + num_train_bad
|
|
||||||
total_val = num_val_good + num_val_bad
|
|
||||||
|
|
||||||
print('total training good images:', num_train_good)
|
|
||||||
print('total training bad images:', num_train_bad)
|
|
||||||
print("--")
|
|
||||||
print("Total training images:", total_train)
|
|
||||||
print('total validation good images:', num_val_good)
|
|
||||||
print('total validation bad images:', num_val_bad)
|
|
||||||
print("--")
|
|
||||||
print("Total validation images:", total_val)
|
|
||||||
print("--")
|
|
||||||
print("Reduce training and validation set when testing")
|
|
||||||
#total_train = 16
|
|
||||||
#total_val = 16
|
|
||||||
print("Reduced training images:", total_train)
|
|
||||||
print("Reduced validation images:", total_val)
|
|
||||||
|
|
||||||
train_image_generator = ImageDataGenerator(
|
|
||||||
rescale=1./255
|
|
||||||
)
|
|
||||||
val_image_generator = ImageDataGenerator(
|
|
||||||
rescale=1./255
|
|
||||||
)
|
|
||||||
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
|
|
||||||
directory=train_dir,
|
|
||||||
shuffle=True,
|
|
||||||
target_size=(IMG_HEIGHT, IMG_WIDTH),
|
|
||||||
class_mode='binary')
|
|
||||||
val_data_gen = val_image_generator.flow_from_directory(batch_size=batch_size,
|
|
||||||
directory=val_dir,
|
|
||||||
target_size=(IMG_HEIGHT, IMG_WIDTH),
|
|
||||||
class_mode='binary')
|
|
||||||
|
|
||||||
def get_uncompiled_model():
|
|
||||||
model = Sequential([
|
|
||||||
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
|
|
||||||
MaxPooling2D(),
|
|
||||||
Conv2D(32, 3, padding='same', activation='relu'),
|
|
||||||
MaxPooling2D(),
|
|
||||||
Conv2D(64, 3, padding='same', activation='relu'),
|
|
||||||
MaxPooling2D(),
|
|
||||||
Flatten(),
|
|
||||||
Dense(512, activation='relu'),
|
|
||||||
Dense(1, activation='sigmoid')
|
|
||||||
])
|
|
||||||
return model
|
|
||||||
|
|
||||||
def get_compiled_model():
|
|
||||||
model = get_uncompiled_model()
|
|
||||||
model.compile(optimizer='adam',
|
|
||||||
loss='binary_crossentropy',
|
|
||||||
metrics=['accuracy'])
|
|
||||||
return model
|
|
||||||
|
|
||||||
#def get_fit_model():
|
|
||||||
# model = get_compiled_model()
|
|
||||||
# model.fit(
|
|
||||||
# train_data_gen,
|
|
||||||
# steps_per_epoch=total_train // batch_size,
|
|
||||||
# epochs=epochs,
|
|
||||||
# validation_data=val_data_gen,
|
|
||||||
# validation_steps=total_val // batch_size,
|
|
||||||
# verbose=2
|
|
||||||
# )
|
|
||||||
# return model
|
|
||||||
|
|
||||||
#with strategy.scope():
|
|
||||||
# get_uncompiled_model()
|
|
||||||
#with strategy.scope():
|
|
||||||
# get_compiled_model()
|
|
||||||
#with strategy.scope():
|
|
||||||
# get_fit_model()
|
|
||||||
|
|
||||||
#multi_worker_model = get_compiled_model()
|
|
||||||
#multi_worker_model.fit(
|
|
||||||
# x=train_data_gen,
|
|
||||||
# epochs=epochs,
|
|
||||||
# steps_per_epoch=total_train // batch_size
|
|
||||||
# )
|
|
||||||
|
|
||||||
with strategy.scope():
|
|
||||||
model = get_compiled_model()
|
|
||||||
model.fit(
|
|
||||||
train_data_gen,
|
|
||||||
steps_per_epoch=total_train // batch_size,
|
|
||||||
epochs=epochs,
|
|
||||||
validation_data=val_data_gen,
|
|
||||||
validation_steps=total_val // batch_size,
|
|
||||||
verbose=2
|
|
||||||
)
|
|
||||||
|
|
||||||
print("TRAINING info")
|
|
||||||
print(train_dir)
|
|
||||||
print(train_good_dir)
|
|
||||||
print(train_bad_dir)
|
|
||||||
print(train_image_generator)
|
|
||||||
print(train_data_gen)
|
|
||||||
|
|
||||||
# The End
|
|
||||||
|
|
26
wut-worker
26
wut-worker
|
@ -1,26 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# wut-worker
|
|
||||||
#
|
|
||||||
# Starts worker client.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# wut-worker
|
|
||||||
# Example:
|
|
||||||
# wut-worker
|
|
||||||
#
|
|
||||||
# Note:
|
|
||||||
# Each node needs a unique index number.
|
|
||||||
#
|
|
||||||
# NOTE!
|
|
||||||
# This generates the node number based off the hostname.
|
|
||||||
# The hosts are ml1 through ml5. The index starts at zero,
|
|
||||||
# so the index is hostname minus one (without alpha).
|
|
||||||
|
|
||||||
HOSTNUM=`hostname | sed -e 's/ml//g'`
|
|
||||||
#let HOSTNUM=$HOSTNUM-1
|
|
||||||
|
|
||||||
export TF_CONFIG='{"cluster": {"worker": [ "10.100.100.130:2222", "ml1:2222", "ml2:2222", "ml3:2222", "ml4:2222", "ml5:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
|
||||||
|
|
||||||
echo $TF_CONFIG
|
|
||||||
python3 wut-worker.py
|
|
||||||
|
|
|
@ -1,26 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# wut-worker-train-cluster-fn
|
|
||||||
#
|
|
||||||
# Starts worker client.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# wut-worker-train-cluster-fn
|
|
||||||
# Example:
|
|
||||||
# wut-worker-train-cluster-fn
|
|
||||||
#
|
|
||||||
# Note:
|
|
||||||
# Each node needs a unique index number.
|
|
||||||
#
|
|
||||||
# NOTE!
|
|
||||||
# This generates the node number based off the hostname.
|
|
||||||
# The hosts are ml1 through ml5. The index starts at zero,
|
|
||||||
# so the index is hostname minus one (without alpha).
|
|
||||||
|
|
||||||
HOSTNUM=`hostname | sed -e 's/ml//g'`
|
|
||||||
#let HOSTNUM=$HOSTNUM-1
|
|
||||||
|
|
||||||
export TF_CONFIG='{"cluster": {"worker": [ "10.100.100.130:2222", "ml1:2222", "ml2:2222", "ml3:2222", "ml4:2222", "ml5:2222"]}, "task": {"index": '$HOSTNUM', "type": "worker"}}'
|
|
||||||
|
|
||||||
echo $TF_CONFIG
|
|
||||||
python3 wut-train-cluster-fn.py
|
|
||||||
|
|
|
@ -1,79 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
#
|
|
||||||
# wut-worker.py --- Runs on worker nodes.
|
|
||||||
#
|
|
||||||
# Start with wut-worker shell script to set correct
|
|
||||||
# environmental variables.
|
|
||||||
|
|
||||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
||||||
import simplejson as json
|
|
||||||
import os
|
|
||||||
import numpy as np
|
|
||||||
import tensorflow as tf
|
|
||||||
import tensorflow.python.keras
|
|
||||||
from tensorflow.python.keras import Sequential
|
|
||||||
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense
|
|
||||||
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
|
|
||||||
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
|
|
||||||
from tensorflow.python.keras import optimizers
|
|
||||||
from tensorflow.python.keras.preprocessing import image
|
|
||||||
from tensorflow.python.keras.models import load_model
|
|
||||||
from tensorflow.python.keras.preprocessing.image import load_img
|
|
||||||
from tensorflow.python.keras.preprocessing.image import img_to_array
|
|
||||||
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
|
|
||||||
from tensorflow.python.keras.models import Model
|
|
||||||
from tensorflow.python.keras.layers import Input, concatenate
|
|
||||||
|
|
||||||
#batch_size = 32
|
|
||||||
#epochs = 4
|
|
||||||
IMG_HEIGHT = 416
|
|
||||||
IMG_WIDTH= 804
|
|
||||||
|
|
||||||
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
|
|
||||||
|
|
||||||
|
|
||||||
def get_uncompiled_model():
|
|
||||||
model = Sequential([
|
|
||||||
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
|
|
||||||
MaxPooling2D(),
|
|
||||||
Conv2D(32, 3, padding='same', activation='relu'),
|
|
||||||
MaxPooling2D(),
|
|
||||||
Conv2D(64, 3, padding='same', activation='relu'),
|
|
||||||
MaxPooling2D(),
|
|
||||||
Flatten(),
|
|
||||||
Dense(512, activation='relu'),
|
|
||||||
Dense(1, activation='sigmoid')
|
|
||||||
])
|
|
||||||
return model
|
|
||||||
|
|
||||||
def get_compiled_model():
|
|
||||||
model = get_uncompiled_model()
|
|
||||||
model.compile(optimizer='adam',
|
|
||||||
loss='binary_crossentropy',
|
|
||||||
metrics=['accuracy'])
|
|
||||||
return model
|
|
||||||
|
|
||||||
def get_fit_model():
|
|
||||||
model = get_compiled_model()
|
|
||||||
model.fit(
|
|
||||||
model )
|
|
||||||
return model
|
|
||||||
|
|
||||||
#def get_fit_model():
|
|
||||||
# model = get_compiled_model()
|
|
||||||
# model.fit(
|
|
||||||
# train_data_gen,
|
|
||||||
# steps_per_epoch=total_train // batch_size,
|
|
||||||
# epochs=epochs,
|
|
||||||
# validation_data=val_data_gen,
|
|
||||||
# validation_steps=total_val // batch_size,
|
|
||||||
# verbose=2
|
|
||||||
# )
|
|
||||||
# return model
|
|
||||||
|
|
||||||
with strategy.scope():
|
|
||||||
get_uncompiled_model()
|
|
||||||
get_compiled_model()
|
|
||||||
get_fit_model()
|
|
||||||
|
|
Loading…
Reference in New Issue