Skip to content

Commit

Permalink
Merge pull request #73 from UoA-CARES/master
Browse files Browse the repository at this point in the history
Update branch
  • Loading branch information
emilysteiner71 authored Jul 16, 2023
2 parents 9d0fd02 + bb333ae commit 223d19a
Show file tree
Hide file tree
Showing 10 changed files with 222 additions and 195 deletions.
2 changes: 1 addition & 1 deletion src/environments/environments/CarTrackEnvironment.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def compute_reward(self, state, next_state):
self.step_counter = 0
self.update_goal_service(self.goal_number)

if has_collided(next_state[8:-2], self.COLLISION_RANGE) or has_flipped_over(next_state[2:6]):
if has_collided(next_state[8:], self.COLLISION_RANGE) or has_flipped_over(next_state[2:6]):
reward -= 25 # TODO: find optimal value for this

return reward
2 changes: 1 addition & 1 deletion src/environments/environments/F1tenthEnvironment.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def __init__(self, env_name, car_name, max_steps, step_length):
self.STEP_LENGTH = step_length

self.MAX_ACTIONS = np.asarray([3, 3.14])
self.MIN_ACTIONS = np.asarray([-0.5, -3.14])
self.MIN_ACTIONS = np.asarray([0, -3.14])

self.ACTION_NUM = 2

Expand Down
2 changes: 1 addition & 1 deletion src/environments/environments/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def avg_reduce_lidar(lidar: LaserScan):

def reduce_lidar(lidar: LaserScan):
ranges = lidar.ranges
ranges = np.nan_to_num(ranges, posinf=float(-1), neginf=float(-1))
ranges = np.nan_to_num(ranges, posinf=float(10), neginf=float(0))
ranges = list(ranges)

reduced_range = []
Expand Down
2 changes: 1 addition & 1 deletion src/f1tenth
7 changes: 4 additions & 3 deletions src/reinforcement_learning/config/test.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
test:
ros__parameters:
environment: 'CarTrack1'
environment: 'CarTrack'
track: 'track_2'
max_steps_evaluation: 1000000
actor_path: models/23_07_05_03:52:58/models/actor_checkpoint.pht
critic_path: models/23_07_05_03:52:58/models/critic_checkpoint.pht
actor_path: rl_logs/23_07_06_01:46:43/models/actor_checkpoint.pht
critic_path: rl_logs/23_07_06_01:46:43/models/critic_checkpoint.pht
max_steps: 300
step_length: 0.25
reward_range: 2.0
Expand Down
31 changes: 22 additions & 9 deletions src/reinforcement_learning/launch/sanity_check.launch.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,37 @@
import os
from ament_index_python import get_package_share_directory
from launch_ros.actions import Node
from launch_ros.actions import Node, SetParameter
from launch import LaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.actions import IncludeLaunchDescription, SetEnvironmentVariable
from launch.substitutions import TextSubstitution
import yaml

env_launch = {
'CarGoal': 'cargoal',
'CarWall': 'carwall',
'CarBlock': 'carblock',
'CarTrack': 'cartrack'
}

def generate_launch_description():
pkg_f1tenth_description = get_package_share_directory('f1tenth_description')
pkg_f1tenth_bringup = get_package_share_directory('f1tenth_bringup')
pkg_environments = get_package_share_directory('environments')

config_path = os.path.join(
get_package_share_directory('reinforcement_learning'),
'train.yaml'
)

config = yaml.load(open(config_path), Loader=yaml.Loader)
env = config['train']['ros__parameters']['environment']

environment = IncludeLaunchDescription(
launch_description_source=PythonLaunchDescriptionSource(
os.path.join(pkg_environments, 'cargoal.launch.py')),
os.path.join(pkg_environments, f'{env_launch[env]}.launch.py')),
launch_arguments={
'car_name': 'f1tenth',
'track': TextSubstitution(text=str(config['train']['ros__parameters']['track'])),
}.items() #TODO: this doesn't do anything
)

Expand All @@ -27,17 +44,12 @@ def generate_launch_description():
}.items()
)

config = os.path.join(
get_package_share_directory('reinforcement_learning'),
'train.yaml'
)

# Launch the Environment
main = Node(
package='reinforcement_learning',
executable='sanity_check',
parameters=[
config
config_path
],
name='sanity_check',
output='screen',
Expand All @@ -47,6 +59,7 @@ def generate_launch_description():
return LaunchDescription([
#TODO: Find a way to remove this
SetEnvironmentVariable(name='GZ_SIM_RESOURCE_PATH', value=pkg_f1tenth_description[:-19]),
SetParameter(name='use_sim_time', value=True),
environment,
f1tenth,
main
Expand Down
7 changes: 3 additions & 4 deletions src/reinforcement_learning/launch/test.launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,14 @@
from launch import LaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.actions import IncludeLaunchDescription, SetEnvironmentVariable
from launch.substitutions import TextSubstitution
import yaml

env_launch = {
'CarGoal': 'cargoal',
'CarWall': 'carwall',
'CarBlock': 'carblock',
'CarTrack': 'cartrack',
'CarTrack1': 'cartrack1',
'CarTrack2': 'cartrack2'
'CarTrack': 'cartrack'
}

def generate_launch_description():
Expand All @@ -32,7 +31,7 @@ def generate_launch_description():
launch_description_source=PythonLaunchDescriptionSource(
os.path.join(pkg_environments, f'{env_launch[env]}.launch.py')),
launch_arguments={
'car_name': 'f1tenth',
'track': TextSubstitution(text=str(config['test']['ros__parameters']['track'])),
}.items() #TODO: this doesn't do anything
)

Expand Down
Loading

0 comments on commit 223d19a

Please sign in to comment.