Skip to content

Commit

Permalink
Merge pull request #46 from jajimer/log
Browse files Browse the repository at this point in the history
V1.0.0
  • Loading branch information
jajimer authored Jun 9, 2021
2 parents 228ccb5 + 7ea92ac commit c7f656e
Show file tree
Hide file tree
Showing 15 changed files with 640 additions and 132 deletions.
2 changes: 1 addition & 1 deletion doc/source/pages/output.rst
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,6 @@ When a simulation is run, this generate a directory called `Eplus-env-<env_name>
- A copy of **socket.cfg** and **utilSocket.idf** which are being used in order to communication interface with Energyplus during simulation.
- **monitor.csv**: This records all interactions Agent-Enviroment during the episode timestep by timestep, the format is: *timestep, observation_values, action_values, simulation_time (seconds), reward, done*.
- **output/**: This directory has EnergyPlus environment output.
- **progress.csv**: This file has information about general simulation results. There is a row per episode and it records most important data. Currently, the format is: *episode,mean_reward,cumulative_reward,num_timestep,time_elapsed*.
- **progress.csv**: This file has information about general simulation results. There is a row per episode and it records most important data. Currently, the format is: *episode_num,cumulative_reward,mean_reward,cumulative_power_consumption,mean_power_consumption,cumulative_comfort_penalty,mean_comfort_penalty,cumulative_power_penalty,mean_power_penalty,comfort_violation (%),length(timesteps),time_elapsed(seconds)*.

.. note:: For more information about EnergyPlus output, visit `EnegyPlus documentation <https://energyplus.net/documentation>`__.
31 changes: 23 additions & 8 deletions energym/envs/eplus_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@

from opyplus import Epm, WeatherData

from ..utils.common import get_current_time_info, parse_variables, create_variable_weather, parse_observation_action_space, CSVLogger
from ..utils.common import get_current_time_info, parse_variables, create_variable_weather, parse_observation_action_space, setpoints_transform, CSVLogger
from ..simulators import EnergyPlus
from ..utils.rewards import SimpleReward
from pprint import pprint


class EplusEnv(gym.Env):
Expand Down Expand Up @@ -92,13 +93,24 @@ def __init__(

# Action space
self.flag_discrete = discrete_actions

# Discrete
if self.flag_discrete:
self.action_mapping = discrete_action_def
self.action_space = gym.spaces.Discrete(len(discrete_action_def))
# Continuous
else:
# Defining action values setpoints (one per value)
self.action_setpoints = []
for i in range(len(self.variables['action'])):
# action_variable --> [low,up]
self.action_setpoints.append([
continuous_action_def[0][i], continuous_action_def[1][i]])

self.action_space = gym.spaces.Box(
low=np.array(continuous_action_def[0]),
high=np.array(continuous_action_def[1]),
# continuous_action_def[2] --> shape
low=np.repeat(-1, continuous_action_def[2][0]),
high=np.repeat(1, continuous_action_def[2][0]),
dtype=continuous_action_def[3]
)

Expand All @@ -110,12 +122,12 @@ def __init__(
# Headers for csv loggers
monitor_header_list = ['timestep,month,day,hour']+self.variables['observation'] + \
self.variables['action']+['time (seconds)', 'reward',
'total_power_no_units', 'comfort_penalty', 'done']
'power_penalty', 'comfort_penalty', 'done']
self.monitor_header = ''
for element_header in monitor_header_list:
self.monitor_header += element_header+','
self.monitor_header = self.monitor_header[:-1]
self.progress_header = 'episode,cumulative_reward,mean_reward,mean_power_consumption,comfort_violation (%),num_timesteps,time_elapsed'
self.progress_header = 'episode_num,cumulative_reward,mean_reward,cumulative_power_consumption,mean_power_consumption,cumulative_comfort_penalty,mean_comfort_penalty,cumulative_power_penalty,mean_power_penalty,comfort_violation (%),length(timesteps),time_elapsed(seconds)'

# Create simulation logger, by default is active (flag=True)
self.logger = CSVLogger(monitor_header=self.monitor_header, progress_header=self.progress_header,
Expand Down Expand Up @@ -150,10 +162,12 @@ def step(self, action):
else:
setpoints = action
else:
print("ERROR: ", action)
print("ERROR: ", type(action))
action_ = list(setpoints)
else:
action_ = list(action)
# transform action to setpoints simulation
action_ = setpoints_transform(
action, self.action_space, self.action_setpoints)

# Send action to the simulator
self.simulator.logger_main.debug(action_)
Expand Down Expand Up @@ -190,7 +204,8 @@ def step(self, action):
'total_power_no_units': terms['reward_energy'],
'comfort_penalty': terms['reward_comfort'],
'temperatures': temp_values,
'out_temperature': obs_dict['Site Outdoor Air Drybulb Temperature (Environment)']
'out_temperature': obs_dict['Site Outdoor Air Drybulb Temperature (Environment)'],
'action_': action_
}

# Record action and new observation in simulator's csv
Expand Down
Loading

0 comments on commit c7f656e

Please sign in to comment.