-
Notifications
You must be signed in to change notification settings - Fork 19
/
Copy pathcarla_env.py
56 lines (39 loc) · 1.79 KB
/
carla_env.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#!/usr/bin/env python
# Copyright (c) 2021 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from __future__ import print_function
import gym
from rllib_integration.carla_core import CarlaCore
class CarlaEnv(gym.Env):
"""
This is a carla environment, responsible of handling all the CARLA related steps of the training.
"""
def __init__(self, config):
"""Initializes the environment"""
self.config = config
self.experiment = self.config["experiment"]["type"](self.config["experiment"])
self.action_space = self.experiment.get_action_space()
self.observation_space = self.experiment.get_observation_space()
self.core = CarlaCore(self.config['carla'])
self.core.setup_experiment(self.experiment.config)
self.reset()
def reset(self):
# Reset sensors hero and experiment
self.hero = self.core.reset_hero(self.experiment.config["hero"])
self.experiment.reset()
# Tick once and get the observations
sensor_data = self.core.tick(None)
observation, _ = self.experiment.get_observation(sensor_data)
return observation
def step(self, action):
"""Computes one tick of the environment in order to return the new observation,
as well as the rewards"""
control = self.experiment.compute_action(action)
sensor_data = self.core.tick(control)
observation, info = self.experiment.get_observation(sensor_data)
done = self.experiment.get_done_status(observation, self.core)
reward = self.experiment.compute_reward(observation, self.core)
return observation, reward, done, info