Skip to content

Commit

Permalink
Merge pull request #1078 from isi-vista/1073-feature-overlay
Browse files Browse the repository at this point in the history
Introduce more complete feature overlay and Experiment Phase 3 prep
  • Loading branch information
lichtefeld authored Jan 10, 2022
2 parents 43542c4 + dce3e0d commit 13da28e
Show file tree
Hide file tree
Showing 54 changed files with 1,539 additions and 572 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ default:
@echo "an explicit target is required"

# easier to test python2 vs. python3
PYTHON=pypy3
PYTHON=python3

SHELL=bash
SOURCE_DIR_NAME=adam
Expand Down
2 changes: 1 addition & 1 deletion adam/curriculum/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def instances(
Tuple[SituationT, LinguisticDescriptionT, PerceptualRepresentation[PerceptionT]]
]:
for (linguistic_description, perception) in self._instances:
yield (None, linguistic_description, perception) # type: ignore
yield None, linguistic_description, perception # type: ignore


@attrs(frozen=True, slots=True)
Expand Down
120 changes: 120 additions & 0 deletions adam/curriculum/curriculum_from_files.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
from typing import Tuple, MutableSequence, Sequence

import yaml
from vistautils.parameters import Parameters

from adam.curriculum import InstanceGroup, ExplicitWithSituationInstanceGroup
from adam.language import TokenSequenceLinguisticDescription
from adam.language.language_generator import (
LanguageGenerator,
InSituationLanguageGenerator,
)
from adam.paths import (
TRAINING_CURRICULUM_DIR,
TESTING_CURRICULUM_DIR,
CURRICULUM_INFO_FILE,
SITUATION_DIR_NAME,
SITUATION_DESCRIPTION_FILE,
FEATURE_YAML,
)
from adam.perception.visual_perception import (
VisualPerceptionFrame,
VisualPerceptionRepresentation,
)
from adam.situation.phase_3_situations import SimulationSituation

PHASE_3_TRAINING_CURRICULUM_OPTIONS = [
"m4_core",
"m4_stretch",
]

PHASE_3_TESTING_CURRICULUM_OPTIONS = ["m4_core_eval", "m4_stretch_eval"]

TRAINING_CUR = "training"
TESTING_CUR = "testing"


def phase3_load_from_disk( # pylint: disable=unused-argument
num_samples: int,
num_noise_objects: int,
language_generator: LanguageGenerator[
SimulationSituation, TokenSequenceLinguisticDescription
] = InSituationLanguageGenerator, # type: ignore
*,
params: Parameters = Parameters.empty(),
) -> Sequence[
InstanceGroup[
SimulationSituation,
TokenSequenceLinguisticDescription,
VisualPerceptionFrame,
]
]:
curriculum_type = params.string(
"curriculum_type", valid_options=[TRAINING_CUR, TESTING_CUR], default=TRAINING_CUR
)
curriculum_to_load = params.string(
"curriculum",
valid_options=PHASE_3_TRAINING_CURRICULUM_OPTIONS
if curriculum_type == TRAINING_CUR
else PHASE_3_TESTING_CURRICULUM_OPTIONS,
)

if curriculum_type == TRAINING_CUR:
root_dir = TRAINING_CURRICULUM_DIR
else:
root_dir = TESTING_CURRICULUM_DIR

curriculum_dir = root_dir / curriculum_to_load

if not curriculum_dir.exists():
raise RuntimeError(
f"Curriculum to load does not exist! Tried to load {curriculum_dir}"
)

with open(
curriculum_dir / CURRICULUM_INFO_FILE, encoding="utf=8"
) as curriculum_info_yaml:
curriculum_params = yaml.safe_load(curriculum_info_yaml)

instances: MutableSequence[
Tuple[
SimulationSituation,
TokenSequenceLinguisticDescription,
VisualPerceptionRepresentation,
]
] = []
for situation_num in range(curriculum_params["num_dirs"]):
situation_dir = curriculum_dir / SITUATION_DIR_NAME.format(num=situation_num)
language_tuple: Tuple[str, ...] = tuple()
if curriculum_type == TRAINING_CUR:
with open(
situation_dir / SITUATION_DESCRIPTION_FILE, encoding="utf-8"
) as situation_description_file:
situation_description = yaml.safe_load(situation_description_file)
language_tuple = tuple(situation_description["language"].split(" "))
situation = SimulationSituation(
language=language_tuple,
scene_images_png=sorted(situation_dir.glob("rgb_*")),
scene_point_cloud=tuple(situation_dir.glob("pdc_rgb_*")),
depth_pngs=sorted(situation_dir.glob("depth_*")),
pdc_semantic_plys=sorted(situation_dir.glob("pdc_semantic_*")),
semantic_pngs=sorted(situation_dir.glob("semantic_*")),
features=sorted(situation_dir.glob("feature_*")),
strokes=sorted(situation_dir.glob("stroke_[0-9]*_[0-9]*.png")),
stroke_graphs=sorted(situation_dir.glob("stroke_graph_*")),
)
language = TokenSequenceLinguisticDescription(tokens=language_tuple)
perception = VisualPerceptionRepresentation.single_frame(
VisualPerceptionFrame.from_yaml(
situation_dir / FEATURE_YAML,
color_is_rgb=params.boolean("color_is_rgb", default=False),
)
)
instances.append((situation, language, perception)) # type: ignore

return [
ExplicitWithSituationInstanceGroup( # type: ignore
name=curriculum_to_load,
instances=tuple(instances),
)
]
4 changes: 3 additions & 1 deletion adam/curriculum/m6_curriculum.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from typing import Sequence, List, Optional

from more_itertools import flatten
from vistautils.parameters import Parameters

from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation
from adam.language.language_generator import LanguageGenerator
Expand Down Expand Up @@ -368,12 +369,13 @@ def instantiate_subcurricula(
]


def make_m6_curriculum(
def make_m6_curriculum( # pylint: disable=unused-argument
num_samples: Optional[int],
num_noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
params: Parameters = Parameters.empty(),
) -> Sequence[Phase1InstanceGroup]:
return instantiate_subcurricula(
M6_SUBCURRICULUM_GENERATORS, num_samples, num_noise_objects, language_generator
Expand Down
12 changes: 9 additions & 3 deletions adam/experiment/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
HTMLLoggerPreObserver,
)
from adam.language import LinguisticDescriptionT
from adam.perception.perception_graph import GraphLogger
from adam.situation import SituationT
from adam.learner import TopLevelLanguageLearner, LearningExample
from adam.perception import PerceptionT
Expand Down Expand Up @@ -185,6 +186,7 @@ def execute_experiment(
debug_learner_pickling: bool = False,
starting_point: int = 0,
point_to_log: int = 0,
perception_graph_logger: Optional[GraphLogger] = None,
) -> None:
"""
Runs an `Experiment`.
Expand Down Expand Up @@ -413,7 +415,7 @@ def execute_experiment(
)

if experiment.pre_example_training_observers:
learner_descriptions_before_seeing_example = learner.describe(
scene_description_before_seeing_example = learner.describe(
perceptual_representation
)
if situation:
Expand All @@ -422,7 +424,7 @@ def execute_experiment(
situation,
linguistic_description,
perceptual_representation,
learner_descriptions_before_seeing_example,
scene_description_before_seeing_example,
)
pre_example_observer.report()
else:
Expand All @@ -433,6 +435,7 @@ def execute_experiment(
learner.observe(
LearningExample(perceptual_representation, linguistic_description),
offset=starting_point,
debug_perception_graph_logger=perception_graph_logger,
)

if experiment.post_example_training_observers:
Expand Down Expand Up @@ -499,7 +502,10 @@ def execute_experiment(
) in test_instance_group.instances():
logging.info(f"Test Description: {num_test_observations}")
num_test_observations += 1
descriptions_from_learner = learner.describe(test_instance_perception)
descriptions_from_learner = learner.describe(
test_instance_perception,
debug_perception_graph_logger=perception_graph_logger,
)
for test_observer in experiment.test_observers:
test_observer.observe(
situation,
Expand Down
52 changes: 0 additions & 52 deletions adam/experiment/curriculum_repository.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,24 +10,17 @@
include all parameters other than the ones that have been specifically ignored. The user can specify
additional ignored parameters as appropriate. Unrecognized parameters are an error.
"""
import json
import os
import pickle
from pathlib import Path
from typing import Tuple, AbstractSet

import yaml
from attr import attrs, attrib
from immutablecollections import immutableset, ImmutableSet
from immutablecollections.converter_utils import _to_tuple
from vistautils.parameters import Parameters

from adam.curriculum import ExplicitWithoutSituationInstanceGroup
from adam.curriculum.curriculum_utils import Phase1InstanceGroup
from adam.language import TokenSequenceLinguisticDescription
from adam.learner.language_mode import LanguageMode, LANGUAGE_MODE_TO_NAME
from adam.perception import PerceptualRepresentation
from adam.perception.visual_perception import VisualPerceptionFrame
from adam.pickle import AdamPickler, AdamUnpickler

_PARAMETER_ORDER: ImmutableSet[str] = immutableset(
Expand Down Expand Up @@ -164,51 +157,6 @@ def _build_curriculum_path(
return curriculum_file_path / _EXPERIMENT_CURRICULUM_FILE_NAME


def read_p3_experiment_curriculum(
repository: Path, parameters: Parameters
) -> ExperimentCurriculum:
# List of tuples of LinguisticDescription and Perception
all_instances = []
# Load yaml and jsons in directory
for situation_dir in [
dir for dir in sorted(os.listdir(repository)) if "situation" in dir
]:
perception_frames = []
# Load perception file
for perception_dir in [
dir
for dir in sorted(os.listdir(repository / situation_dir))
if "perception" in dir
]:
with open(
repository / situation_dir / perception_dir, "r", encoding="utf-8"
) as perception_file:
perception_json = json.load(perception_file)
perception_frames.append(VisualPerceptionFrame.from_json(perception_json))
# Load description file
with open(
repository / situation_dir / "description.yaml", "r", encoding="utf-8"
) as description_file:
description_yaml = yaml.load(description_file)
utterance = description_yaml["language"]

linguistic_description = TokenSequenceLinguisticDescription(utterance.split())
perceptual_representation = PerceptualRepresentation[VisualPerceptionFrame](
perception_frames
)
all_instances.append((linguistic_description, perceptual_representation))
# Convert them to InstanceGroups
train_curriculum: Tuple[Phase1InstanceGroup, ...] = tuple(
[
ExplicitWithoutSituationInstanceGroup(
parameters.string("experiment"), tuple(all_instances)
)
]
)
test_curriculum: Tuple[Phase1InstanceGroup, ...] = tuple()
return ExperimentCurriculum(train_curriculum, test_curriculum)


def read_experiment_curriculum(
repository: Path,
parameters: Parameters,
Expand Down
Loading

0 comments on commit 13da28e

Please sign in to comment.