satnogs-wut/src/wut-tf.py

61 lines
2.2 KiB
Python
Raw Permalink Normal View History

2020-01-20 12:26:00 -07:00
#!/usr/bin/env python3
#
# wut-tf.py
#
# https://spacecruft.org/spacecruft/satnogs-wut
#
# Distributed Learning
from __future__ import absolute_import, division, print_function, unicode_literals
from __future__ import print_function
import os
import json
import numpy as np
import datetime
import tensorflow as tf
import tensorflow.python.keras
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
2022-08-16 18:58:50 -06:00
from tensorflow.keras import optimizers
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from tensorflow.keras.layers import Input, concatenate
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import load_img
2020-01-20 12:26:00 -07:00
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": [ "ml1-int:2222", "ml2-int:2222", "ml3-int:2222", "ml4-int:2222", "ml5-int:2222" ]
}#,
#"task": {"type": "worker", "index": 0 },
})
print("Tensorflow Version: ", tf.__version__)
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
print("Num CPUs Available: ", len(tf.config.experimental.list_physical_devices('CPU')))
print(tf.config.experimental.list_physical_devices())
#with tf.device("GPU:0"):
# tf.ones(()) # Make sure we can run on GPU
print("XLA_FLAGS='{}'".format(os.getenv("XLA_FLAGS")))
print(os.getenv("XLA_FLAGS"))
tf.keras.backend.clear_session()
IMG_HEIGHT = 416
IMG_WIDTH= 804
batch_size = 32
epochs = 4
BUFFER_SIZE = 10000
NUM_WORKERS = 6
GLOBAL_BATCH_SIZE = 64 * NUM_WORKERS
#strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
#strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
# tf.distribute.experimental.CollectiveCommunication.RING)
AUTOTUNE = tf.data.experimental.AUTOTUNE
NUM_TOTAL_IMAGES=100
tf.config.optimizer.set_jit(True)
#tf.summary.trace_on(profiler=True)
#tf.summary.trace_export(name=trace-export,profiler_outdir=logs)
options = tf.data.Options()