Cleanup simulator and add usage instructions (#1050)

* cleanup simulator files

* minor updates

* update readme

* keras runner builds

* hmm, still doesn't work

* keras runner works

* should work with python3 keras mod

* touchups
pull/1065/head
George Hotz 2020-02-04 19:46:57 -08:00 committed by GitHub
parent 6b1506740e
commit c50c718293
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 197 additions and 193 deletions

View File

@ -1,4 +0,0 @@
*.tar.gz
include
lib
*LICENSE*

View File

@ -1,11 +0,0 @@
#!/bin/bash
TF=libtensorflow-gpu-linux-x86_64-1.13.1.tar.gz
#TF=libtensorflow-gpu-linux-x86_64-1.14.0.tar.gz
#TF=libtensorflow-gpu-linux-x86_64-1.15.0.tar.gz
if [ ! -f $TF ]; then
wget https://storage.googleapis.com/tensorflow/libtensorflow/$TF
fi
rm -rf include lib
tar xvf $TF

BIN
models/supercombo.dlc.pb (Stored with Git LFS)

Binary file not shown.

View File

@ -14,13 +14,11 @@ if arch == "aarch64":
else:
libs += ['symphony-cpu', 'pthread']
if FindFile('libtensorflow.so', env['LIBPATH']):
# for tensorflow support
common_src += ['runners/tfmodel.cc']
libs += ['tensorflow']
# tell runners to use it
lenv['CFLAGS'].append("-DUSE_TF_MODEL")
lenv['CXXFLAGS'].append("-DUSE_TF_MODEL")
# for tensorflow support
common_src += ['runners/tfmodel.cc']
# tell runners to use it
lenv['CFLAGS'].append("-DUSE_TF_MODEL")
lenv['CXXFLAGS'].append("-DUSE_TF_MODEL")
common = lenv.Object(common_src)

View File

@ -108,7 +108,7 @@ DMonitoringResult dmonitoring_eval_frame(DMonitoringModelState* s, void* stream_
delete[] cropped_buf;
delete[] resized_buf;
s->m->execute(net_input_buf);
s->m->execute(net_input_buf, yuv_buf_len);
delete[] net_input_buf;
DMonitoringResult ret = {0};

View File

@ -70,7 +70,7 @@ ModelDataRaw model_eval_frame(ModelState* s, cl_command_queue q,
float *new_frame_buf = frame_prepare(&s->frame, q, yuv_cl, width, height, transform);
memmove(&s->input_frames[0], &s->input_frames[MODEL_FRAME_SIZE], sizeof(float)*MODEL_FRAME_SIZE);
memmove(&s->input_frames[MODEL_FRAME_SIZE], new_frame_buf, sizeof(float)*MODEL_FRAME_SIZE);
s->m->execute(s->input_frames);
s->m->execute(s->input_frames, MODEL_FRAME_SIZE*2);
#ifdef DUMP_YUV
FILE *dump_yuv_file = fopen("/sdcard/dump.yuv", "wb");

View File

@ -0,0 +1,53 @@
#!/usr/bin/env python3
# TODO: why are the keras models saved with python 2?
from __future__ import print_function
import tensorflow as tf
import os
import sys
import tensorflow.keras as keras
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
def read(sz):
dd = []
gt = 0
while gt < sz*4:
st = os.read(0, sz*4 - gt)
assert(len(st) > 0)
dd.append(st)
gt += len(st)
return np.fromstring(b''.join(dd), dtype=np.float32)
def write(d):
os.write(1, d.tobytes())
def run_loop(m):
isize = m.inputs[0].shape[1]
osize = m.outputs[0].shape[1]
print("ready to run keras model %d -> %d" % (isize, osize), file=sys.stderr)
while 1:
idata = read(isize).reshape((1, isize))
ret = m.predict_on_batch(idata)
write(ret)
if __name__ == "__main__":
print(tf.__version__, file=sys.stderr)
m = load_model(sys.argv[1])
print(m, file=sys.stderr)
bs = [int(np.product(ii.shape[1:])) for ii in m.inputs]
ri = keras.layers.Input((sum(bs),))
tii = []
acc = 0
for i, ii in enumerate(m.inputs):
print(ii, file=sys.stderr)
ti = keras.layers.Lambda(lambda x: x[:,acc:acc+bs[i]], output_shape=(1, bs[i]))(ri)
acc += bs[i]
tr = keras.layers.Reshape(ii.shape[1:])(ti)
tii.append(tr)
no = keras.layers.Concatenate()(m(tii))
m = Model(inputs=ri, outputs=[no])
run_loop(m)

View File

@ -5,7 +5,7 @@ class RunModel {
public:
virtual void addRecurrent(float *state, int state_size) {}
virtual void addDesire(float *state, int state_size) {}
virtual void execute(float *net_input_buf) {}
virtual void execute(float *net_input_buf, int buf_size) {}
};
#endif

View File

@ -117,7 +117,7 @@ std::unique_ptr<zdl::DlSystem::IUserBuffer> SNPEModel::addExtra(float *state, in
return ret;
}
void SNPEModel::execute(float *net_input_buf) {
void SNPEModel::execute(float *net_input_buf, int buf_size) {
assert(inputBuffer->setBufferAddress(net_input_buf));
if (!snpe->execute(inputMap, outputMap)) {
PrintErrorStringAndExit();

View File

@ -25,7 +25,7 @@ public:
}
void addRecurrent(float *state, int state_size);
void addDesire(float *state, int state_size);
void execute(float *net_input_buf);
void execute(float *net_input_buf, int buf_size);
private:
uint8_t *model_data = NULL;

View File

@ -1,160 +1,98 @@
#include "tfmodel.h"
#include <stdio.h>
#include <string>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdexcept>
#include "common/util.h"
#include "common/utilpp.h"
#include "common/swaglog.h"
#include <cassert>
void TFModel::status_check() const {
if (TF_GetCode(this->status) != TF_OK) {
throw std::runtime_error(TF_Message(status));
}
}
TF_Tensor *TFModel::allocate_tensor_for_output(TF_Output out, float *dat) {
int num_dims = TF_GraphGetTensorNumDims(graph, out, status);
status_check();
int64_t *dims = new int64_t[num_dims];
TF_GraphGetTensorShape(graph, out, dims, num_dims, status);
status_check();
dims[0] = 1;
int total = 1;
for (int i = 0; i < num_dims; i++) total *= dims[i];
//printf("dims %d total %d wdat %p\n", num_dims, total, dat);
// don't deallocate the buffers
auto d = [](void* ddata, size_t, void* arg) {};
TF_Tensor *ret = TF_NewTensor(TF_FLOAT, dims, num_dims, (void*)dat, sizeof(float)*total, d, NULL);
//TF_Tensor *ret = TF_AllocateTensor(TF_FLOAT, dims, num_dims, sizeof(float)*total);
//memcpy(TF_TensorData(ret), dat, sizeof(float)*total);
assert(ret);
delete[] dims;
return ret;
}
TFModel::TFModel(const char *path, float *_output, size_t _output_size, int runtime) {
// load model
{
TF_Buffer* buf;
size_t model_size;
char tmp[1024];
snprintf(tmp, sizeof(tmp), "%s.pb", path);
LOGD("loading model %s", tmp);
uint8_t *model_data = (uint8_t *)read_file(tmp, &model_size);
assert(model_data);
buf = TF_NewBuffer();
buf->data = model_data;
buf->length = model_size;
buf->data_deallocator = [](void *data, size_t) { free(data); };
LOGD("loaded model of size %d", model_size);
// import graph
status = TF_NewStatus();
graph = TF_NewGraph();
TF_ImportGraphDefOptions *opts = TF_NewImportGraphDefOptions();
// TODO: fix the GPU, currently it hangs if you set this to /gpu:0
//TF_ImportGraphDefOptionsSetDefaultDevice(opts, "/cpu:0");
TF_GraphImportGraphDef(graph, buf, opts, status);
TF_DeleteImportGraphDefOptions(opts);
TF_DeleteBuffer(buf);
status_check();
LOGD("imported graph");
}
// set up session
TF_SessionOptions* sess_opts = TF_NewSessionOptions();
// don't use all GPU memory
/*uint8_t config[15] = {0x32, 0xb, 0x9, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x20, 0x1, 0x38, 0x1};
double gpu_memory_fraction = 0.2;
auto bytes = reinterpret_cast<std::uint8_t*>(&gpu_memory_fraction);
for (std::size_t i = 0; i < sizeof(gpu_memory_fraction); ++i) {
config[i + 3] = bytes[i];
}
TF_SetConfig(sess_opts, config, sizeof(config), status);
status_check();*/
// make session
session = TF_NewSession(graph, sess_opts, status);
TF_DeleteSessionOptions(sess_opts);
status_check();
// find tensors
// TODO: make this generic
input_operation = {TF_GraphOperationByName(graph, "lambda/div"), 0};
if (input_operation.oper == NULL) {
input_operation = {TF_GraphOperationByName(graph, "vision_lambda/div"), 0};
}
assert(input_operation.oper != NULL);
output_operation = {TF_GraphOperationByName(graph, "outputs/outputs/Identity"), 0};
if (output_operation.oper == NULL) {
output_operation = {TF_GraphOperationByName(graph, "outputs/concat"), 0};
}
assert(output_operation.oper != NULL);
// output tensor is good to bind now
output = _output;
output_size = _output_size;
char tmp[1024];
strncpy(tmp, path, sizeof(tmp));
strstr(tmp, ".dlc")[0] = '\0';
strcat(tmp, ".keras");
LOGD("loading model %s", tmp);
assert(pipe(pipein) == 0);
assert(pipe(pipeout) == 0);
std::string exe_dir = util::dir_name(util::readlink("/proc/self/exe"));
std::string keras_runner = exe_dir + "/runners/keras_runner.py";
proc_pid = fork();
if (proc_pid == 0) {
LOGD("spawning keras process %s", keras_runner.c_str());
char *argv[] = {(char*)keras_runner.c_str(), tmp, NULL};
dup2(pipein[0], 0);
dup2(pipeout[1], 1);
close(pipein[0]);
close(pipein[1]);
close(pipeout[0]);
close(pipeout[1]);
execvp(keras_runner.c_str(), argv);
}
// parent
close(pipein[0]);
close(pipeout[1]);
}
TFModel::~TFModel() {
TF_DeleteSession(session, status);
status_check();
TF_DeleteGraph(graph);
TF_DeleteStatus(status);
close(pipein[1]);
close(pipeout[0]);
kill(proc_pid, SIGTERM);
}
void TFModel::pwrite(float *buf, int size) {
char *cbuf = (char *)buf;
int tw = size*sizeof(float);
while (tw > 0) {
int err = write(pipein[1], cbuf, tw);
//printf("host write %d\n", err);
assert(err >= 0);
cbuf += err;
tw -= err;
}
//printf("host write done\n");
}
void TFModel::pread(float *buf, int size) {
char *cbuf = (char *)buf;
int tr = size*sizeof(float);
while (tr > 0) {
int err = read(pipeout[0], cbuf, tr);
//printf("host read %d/%d\n", err, tr);
assert(err >= 0);
cbuf += err;
tr -= err;
}
//printf("host read done\n");
}
void TFModel::addRecurrent(float *state, int state_size) {
rnn_operation.oper = TF_GraphOperationByName(graph, "rnn_state");
rnn_operation.index = 0;
assert(rnn_operation.oper != NULL);
rnn_input_buf = state;
rnn_state_size = state_size;
}
void TFModel::addDesire(float *state, int state_size) {
desire_operation.oper = TF_GraphOperationByName(graph, "desire");
desire_operation.index = 0;
assert(desire_operation.oper != NULL);
desire_input_buf = state;
desire_state_size = state_size;
}
void TFModel::execute(float *net_input_buf) {
TF_Tensor *input_tensor = allocate_tensor_for_output(input_operation, net_input_buf);
assert(input_tensor);
TF_Tensor *output_tensor = NULL;
if (rnn_input_buf == NULL) {
TF_SessionRun(session, NULL,
&input_operation, &input_tensor, 1,
&output_operation, &output_tensor, 1,
NULL, 0, NULL, status);
} else {
//printf("%f %f %f\n", net_input_buf[0], rnn_input_buf[0], desire_input_buf[0]);
TF_Tensor *rnn_tensor = allocate_tensor_for_output(rnn_operation, rnn_input_buf);
TF_Tensor *desire_tensor = allocate_tensor_for_output(desire_operation, desire_input_buf);
TF_Output io[] = {input_operation, rnn_operation, desire_operation};
TF_Tensor* id[] = {input_tensor, rnn_tensor, desire_tensor};
TF_SessionRun(session, NULL,
io, id, 3,
&output_operation, &output_tensor, 1,
NULL, 0, NULL, status);
TF_DeleteTensor(rnn_tensor);
TF_DeleteTensor(desire_tensor);
}
TF_DeleteTensor(input_tensor);
status_check();
assert(output_tensor);
memcpy((void*)output, TF_TensorData(output_tensor), output_size*sizeof(float));
TF_DeleteTensor(output_tensor);
void TFModel::execute(float *net_input_buf, int buf_size) {
// order must be this
pwrite(net_input_buf, buf_size);
pwrite(desire_input_buf, desire_state_size);
pwrite(rnn_input_buf, rnn_state_size);
pread(output, output_size);
}

View File

@ -4,8 +4,6 @@
#include <stdlib.h>
#include "runmodel.h"
#include "tensorflow/c/c_api.h"
struct TFState;
class TFModel : public RunModel {
@ -14,25 +12,23 @@ public:
~TFModel();
void addRecurrent(float *state, int state_size);
void addDesire(float *state, int state_size);
void execute(float *net_input_buf);
void execute(float *net_input_buf, int buf_size);
private:
void status_check() const;
TF_Tensor *allocate_tensor_for_output(TF_Output out, float *dat);
int proc_pid;
float *output;
size_t output_size;
TF_Session* session;
TF_Graph* graph;
TF_Status* status;
TF_Output input_operation;
TF_Output rnn_operation;
TF_Output desire_operation;
TF_Output output_operation;
float *rnn_input_buf = NULL;
int rnn_state_size;
float *desire_input_buf = NULL;
int desire_state_size;
// pipe to communicate to keras subprocess
void pread(float *buf, int size);
void pwrite(float *buf, int size);
int pipein[2];
int pipeout[2];
};
#endif

View File

@ -1,3 +1,4 @@
CARLA_*.tar.gz
carla
carla_tmp

24
tools/sim/README 100644
View File

@ -0,0 +1,24 @@
Needs Ubuntu 16.04
== Checkout openpilot ==
cd ~/
git clone https://github.com/commaai/openpilot.git
# Add export PYTHONPATH=$HOME/openpilot to your bashrc
# Have a working tensorflow+keras in python2
== Install (in tab 1) ==
cd ~/openpilot/tools/sim
./start_carla.sh # install CARLA 0.9.7 and start the server
== openpilot (in tab 2) ==
cd ~/openpilot/selfdrive/
PASSIVE=0 NOBOARD=1 ./manager.py
== bridge (in tab 3) ==
# links carla to openpilot, will "start the car" according to manager
cd ~/openpilot/tools/sim
./bridge.py

View File

@ -7,11 +7,16 @@ import numpy as np
import threading
import random
import cereal.messaging as messaging
import argparse
from common.params import Params
from common.realtime import Ratekeeper
from can import can_function, sendcan_function
from lib.can import can_function, sendcan_function
import queue
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--autopilot', action='store_true')
args = parser.parse_args()
pm = messaging.PubMaster(['frame', 'sensorEvents', 'can'])
W,H = 1164, 874
@ -99,7 +104,9 @@ def go():
vehicle_bp = random.choice(blueprint_library.filter('vehicle.bmw.*'))
vehicle = world.spawn_actor(vehicle_bp, random.choice(world_map.get_spawn_points()))
#vehicle.set_autopilot(True)
if args.autopilot:
vehicle.set_autopilot(True)
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
@ -123,7 +130,6 @@ def go():
print("done")
atexit.register(destroy)
# can loop
sendcan = messaging.sub_sock('sendcan')
rk = Ratekeeper(100)
@ -148,6 +154,7 @@ if __name__ == "__main__":
from selfdrive.version import terms_version, training_version
params.put("HasAcceptedTerms", terms_version)
params.put("CompletedTrainingVersion", training_version)
params.put("CommunityFeaturesToggle", "1")
threading.Thread(target=health_function).start()
threading.Thread(target=fake_driver_monitoring).start()

View File

@ -1,10 +0,0 @@
#!/bin/bash -e
FILE=CARLA_0.9.7.tar.gz
if [ ! -f $FILE ]; then
curl -O http://carla-assets-internal.s3.amazonaws.com/Releases/Linux/$FILE
fi
mkdir -p carla
cd carla
tar xvf ../$FILE
easy_install PythonAPI/carla/dist/carla-0.9.7-py3.5-linux-x86_64.egg

View File

View File

@ -1,4 +0,0 @@
#!/bin/bash
cd carla
./CarlaUE4.sh

View File

@ -0,0 +1,19 @@
#!/bin/bash -e
FILE=CARLA_0.9.7.tar.gz
if [ ! -f $FILE ]; then
curl -O http://carla-assets-internal.s3.amazonaws.com/Releases/Linux/$FILE
fi
if [ ! -d carla ]; then
rm -rf carla_tmp
mkdir -p carla_tmp
cd carla_tmp
tar xvf ../$FILE
easy_install PythonAPI/carla/dist/carla-0.9.7-py3.5-linux-x86_64.egg
cd ../
mv carla_tmp carla
fi
cd carla
./CarlaUE4.sh