-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdrive_rover.py
212 lines (189 loc) · 7.92 KB
/
drive_rover.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
# Do the necessary imports
import argparse
import shutil
import base64
from datetime import datetime
import os
import cv2
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO, StringIO
import json
import pickle
import matplotlib.image as mpimg
import time
# Import functions for perception and decision making
from perception import perception_step
from decision import decision_step
from supporting_functions import update_rover, create_output_images
# Initialize socketio server and Flask application
# (learn more at: https://python-socketio.readthedocs.io/en/latest/)
sio = socketio.Server()
app = Flask(__name__)
# Read in ground truth map and create 3-channel green version for overplotting
# NOTE: images are read in by default with the origin (0, 0) in the upper left
# and y-axis increasing downward.
ground_truth = mpimg.imread('../calibration_images/map_bw.png')
# This next line creates arrays of zeros in the red and blue channels
# and puts the map into the green channel. This is why the underlying
# map output looks green in the display image
ground_truth_3d = np.dstack((ground_truth*0, ground_truth*255, ground_truth*0)).astype(np.float)
# Define RoverState() class to retain rover state parameters
class RoverState():
def __init__(self):
self.start_time = None # To record the start time of navigation
self.total_time = None # To record total duration of naviagation
self.img = None # Current camera image
self.pos = None # Current position (x, y)
self.yaw = None # Current yaw angle
self.pitch = None # Current pitch angle
self.roll = None # Current roll angle
self.vel = None # Current velocity
self.steer = 0 # Current steering angle
self.throttle = 0 # Current throttle value
self.brake = 0 # Current brake value
self.nav_angles = None # Angles of navigable terrain pixels
self.nav_dists = None # Distances of navigable terrain pixels
self.ground_truth = ground_truth_3d # Ground truth worldmap
self.mode = 'forward' # Current mode (can be forward or stop)
self.throttle_set = 0.2 # Throttle setting when accelerating
self.brake_set = 10 # Brake setting when braking
# The stop_forward and go_forward fields below represent total count
# of navigable terrain pixels. This is a very crude form of knowing
# when you can keep going and when you should stop. Feel free to
# get creative in adding new fields or modifying these!
self.stop_forward = 50 # Threshold to initiate stopping
self.go_forward = 500 # Threshold to go forward again
self.max_vel = 2 # Maximum velocity (meters/second)
# Image output from perception step
# Update this image to display your intermediate analysis steps
# on screen in autonomous mode
self.vision_image = np.zeros((160, 320, 3), dtype=np.float)
# Worldmap
# Update this image with the positions of navigable terrain
# obstacles and rock samples
self.worldmap = np.zeros((200, 200, 3), dtype=np.float)
self.samples_pos = None # To store the actual sample positions
self.samples_to_find = 0 # To store the initial count of samples
self.samples_located = 0 # To store number of samples located on map
self.samples_collected = 0 # To count the number of samples collected
self.near_sample = 0 # Will be set to telemetry value data["near_sample"]
self.picking_up = 0 # Will be set to telemetry value data["picking_up"]
self.send_pickup = False # Set to True to trigger rock pickup
# Initialize our rover
Rover = RoverState()
# Variables to track frames per second (FPS)
# Intitialize frame counter
frame_counter = 0
# Initalize second counter
second_counter = time.time()
fps = None
# Define telemetry function for what to do with incoming data
@sio.on('telemetry')
def telemetry(sid, data):
global frame_counter, second_counter, fps
frame_counter+=1
# Do a rough calculation of frames per second (FPS)
if (time.time() - second_counter) > 1:
fps = frame_counter
frame_counter = 0
second_counter = time.time()
print("Current FPS: {}".format(fps))
if data:
global Rover
# Initialize / update Rover with current telemetry
Rover, image = update_rover(Rover, data)
if np.isfinite(Rover.vel):
# Execute the perception and decision steps to update the Rover's state
Rover = perception_step(Rover)
Rover = decision_step(Rover)
# Create output images to send to server
out_image_string1, out_image_string2 = create_output_images(Rover)
# The action step! Send commands to the rover!
# Don't send both of these, they both trigger the simulator
# to send back new telemetry so we must only send one
# back in respose to the current telemetry data.
# If in a state where want to pickup a rock send pickup command
if Rover.send_pickup and not Rover.picking_up:
send_pickup()
# Reset Rover flags
Rover.send_pickup = False
else:
# Send commands to the rover!
commands = (Rover.throttle, Rover.brake, Rover.steer)
send_control(commands, out_image_string1, out_image_string2)
# In case of invalid telemetry, send null commands
else:
# Send zeros for throttle, brake and steer and empty images
send_control((0, 0, 0), '', '')
# If you want to save camera images from autonomous driving specify a path
# Example: $ python drive_rover.py image_folder_path
# Conditional to save image frame if folder was specified
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
else:
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control((0, 0, 0), '', '')
sample_data = {}
sio.emit(
"get_samples",
sample_data,
skip_sid=True)
def send_control(commands, image_string1, image_string2):
# Define commands to be sent to the rover
data={
'throttle': commands[0].__str__(),
'brake': commands[1].__str__(),
'steering_angle': commands[2].__str__(),
'inset_image1': image_string1,
'inset_image2': image_string2,
}
# Send commands via socketIO server
sio.emit(
"data",
data,
skip_sid=True)
eventlet.sleep(0)
# Define a function to send the "pickup" command
def send_pickup():
print("Picking up")
pickup = {}
sio.emit(
"pickup",
pickup,
skip_sid=True)
eventlet.sleep(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
#os.system('rm -rf IMG_stream/*')
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("Recording this run ...")
else:
print("NOT recording this run ...")
# wrap Flask application with socketio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)