Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature camera stream #22

Merged
merged 4 commits into from
Feb 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5,571 changes: 4,795 additions & 776 deletions Cargo.lock

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ serde_json = "1.0"
thiserror = "1.0"
toml = "0.8.19"
zstd = "0.13.2"
slint = "1.9.1"

[build-dependencies]
bindgen = "0.70.1"
Expand Down
7 changes: 7 additions & 0 deletions examples/camera_to_aedat4.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import faery

(
faery.events_stream_from_camera("Inivation") # Open an Inivation camera
.crop(10, 110, 10, 110) # Remove events outside the region (10, 10) to (110, 110)
.to_file("some.aedat4") # Save the events to a file
)
2 changes: 1 addition & 1 deletion flake.nix
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
pkgs = import nixpkgs { inherit system; };
faery =
let
pypkgs = pkgs.python313Packages;
pypkgs = pkgs.python312Packages;
in
pkgs.mkShell {
buildInputs = [
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ repository = "https://github.com/aestream/faery/"
documentation = "https://github.com/aestream/faery/"

[build-system]
requires = ["maturin==1.7.4"]
requires = ["maturin==1.8.0"]
build-backend = "maturin"

[tool.maturin]
Expand Down
3 changes: 3 additions & 0 deletions python/faery/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,8 @@ def name_to_colormaps() -> dict[str, Colormap]:
)
}

from .event_camera_input import events_stream_from_camera


__all__ = [
"__version__",
Expand Down Expand Up @@ -181,6 +183,7 @@ def name_to_colormaps() -> dict[str, Colormap]:
"VideoFileTune",
"VideoFileType",
"events_stream_from_array",
"events_stream_from_camera",
"events_stream_from_file",
"events_stream_from_stdin",
"events_stream_from_udp",
Expand Down
12 changes: 12 additions & 0 deletions python/faery/cli/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def add_csv_properties(parser: argparse.ArgumentParser):
def input_parser() -> argparse.ArgumentParser:
parser = base_parser("input")
subparsers = parser.add_subparsers(required=True, dest="input")
# Stdin subparser
subparser = subparsers.add_parser("stdin")
subparser.add_argument(
"--dimensions",
Expand Down Expand Up @@ -125,6 +126,10 @@ def input_parser() -> argparse.ArgumentParser:
help="(default: %(default)s)",
)
add_csv_properties(subparser)
# Inivation subparser
subparser = subparsers.add_parser("inivation")
subparser.add_argument("--buffer-size", type=int, default=1024, help="Array buffer size (default: %(default)s)")
# UDP subparser
subparser = subparsers.add_parser("udp")
subparser.add_argument("address", type=list_filters.parse_udp)
subparser.add_argument(
Expand Down Expand Up @@ -274,6 +279,11 @@ def output_parser(
default="none",
help="(default: %(default)s)",
)
subparser.add_argument(
"--enforce-monotonic-timestamps",
action=argparse.BooleanOptionalAction,
help="Enforce non-monitonic timestamps. Defaults to True. Negate this with causion: some file formats do not support non-monotonic timestamps.")
subparser.set_defaults(enforce_monotonic_timestamps=True)
if stream_parent_class in {
faery.FiniteEventsStream,
faery.FiniteRegularEventsStream,
Expand Down Expand Up @@ -490,6 +500,8 @@ def set_input(self, arguments: list[str]):
skip_errors=args.csv_skip_errors,
),
)
elif args.input == "inivation":
self.stream = faery.events_stream_from_camera("Inivation")
elif args.input == "udp":
self.stream = faery.events_stream_from_udp(
dimensions=args.dimensions,
Expand Down
60 changes: 60 additions & 0 deletions python/faery/event_camera_input.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import importlib.util
import logging
import typing

import numpy as np

import faery.events_stream as events_stream

def has_event_camera_drivers():
return importlib.util.find_spec("event_camera_drivers") is not None


def has_inivation_camera_drivers():
if has_event_camera_drivers():
import event_camera_drivers as evd
return hasattr(evd, "InivationCamera")
return False


class InivationCameraStream(events_stream.EventsStream):
def __init__(self, buffer_size: int = 1024):
"""Create an events stream from a connected Inivation camera

Args:
buffer_size: The size of the buffer to use for the event stream, defaults to 1024

Returns:
An (infinite) event stream from the camera

Usage:
>>> stream = InivationCameraStream() # Open camera (will fail if no camera is connected)
>>> stream.map(...) # Use the stream as any other (infinite) event stream
"""

super().__init__()

try:
import event_camera_drivers as evd
self.camera = evd.InivationCamera(buffer_size=buffer_size)
except ImportError as e:
logging.error("Inivation camera drivers not available, please install the event_camera_drivers library")
raise e

def __iter__(self) -> typing.Iterator[np.ndarray]:
while self.camera.is_running():
v = next(self.camera)
yield v

def dimensions(self):
return self.camera.resolution()


def events_stream_from_camera(manufacturer: typing.Literal["Inivation", "Prophesee"], buffer_size: int = 1024):
if manufacturer == "Inivation":
return InivationCameraStream(buffer_size)
elif manufacturer == "Prophesee":
raise NotImplementedError("Prophesee camera drivers are not implemented yet")
else:
raise ValueError(f"Unknown camera manufacturer: {manufacturer}")

6 changes: 5 additions & 1 deletion python/faery/events_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ def to_file(
csv_header: bool = True,
file_type: typing.Optional[enums.EventsFileType] = None,
on_progress: typing.Callable[[OutputState], None] = lambda _: None,
enforce_monotonic_timestamps: bool = True,
) -> str:
"""
Writes the stream to an event file (supports .aedat4, .es, .raw, and .dat).
Expand All @@ -128,6 +129,8 @@ def to_file(
csv_separator: Separator between CSV fields. Defaults to b",".
csv_header: Whether to generate a CSV header. Defaults to True.
file_type: Override the type determination algorithm. Defaults to None.
enforce_monotonic_timestamps: Whether to enforce that timestamps are monotonically increasing. Note that some formats
(such as AEDAT, ES, and DAT) do not support non-monotonic timestamps. Defaults to True.

Returns:
The original t0 as a timecode if the file type is ES, EVT (.raw) or DAT, and if `zero_t0` is true. 0 as a timecode otherwise.
Expand All @@ -154,6 +157,7 @@ def to_file(
file_type=file_type,
use_write_suffix=use_write_suffix,
on_progress=on_progress, # type: ignore
enforce_monotonic_timestamps=enforce_monotonic_timestamps,
)

def to_stdout(
Expand Down Expand Up @@ -407,7 +411,7 @@ def to_event_rate(


class RegularEventsStream(
stream.RegularStream[numpy.ndarray],
stream.Stream[numpy.ndarray],
Output[events_stream_state.RegularEventsStreamState],
):
def regularize(
Expand Down
1 change: 0 additions & 1 deletion python/faery/events_stream_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ class FiniteRegularEventsStreamState:
Total number of packets in the stream.
"""


class StateManager:
"""
Keeps track of the number of frames processed by the stream and calls `on_progress`.
Expand Down
8 changes: 8 additions & 0 deletions python/faery/file_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def events_to_file(
on_progress: typing.Callable[
[events_stream_state.EventsStreamState], None
] = lambda _: None,
enforce_monotonic_timestamps: bool = True,
) -> str:
"""Writes the stream to an event file (supports .aedat4, .es, .raw, and .dat).

Expand All @@ -54,6 +55,8 @@ def events_to_file(
csv_separator: Separator for CSV files, must be a single character. Defaults to b",".
csv_header: Whether to add a header to the CSV file. Defaults to True.
file_type: Override the type determination algorithm. Defaults to None.
enforce_monotonic_timestamps: Whether to enforce that timestamps are monotonically increasing. Note that some formats
(such as AEDAT, ES, and DAT) do not support non-monotonic timestamps. Defaults to True.

Returns:
The original t0 as a timecode if the file type is ES, EVT (.raw), or DAT, and if `zero_t0` is true. 0 as a timecode otherwise.
Expand Down Expand Up @@ -91,6 +94,7 @@ def events_to_file(
)
if file_type == "aedat":
assert path is not None
assert enforce_monotonic_timestamps, "AEDAT files do not support non-monotonic timestamps, since timestamps may overflow"
with aedat.Encoder(
path=path if write_path is None else write_path,
description_or_tracks=[
Expand All @@ -113,6 +117,7 @@ def events_to_file(
separator=csv_separator[0],
header=csv_header,
dimensions=dimensions,
enforce_monotonic=enforce_monotonic_timestamps,
) as encoder:
state_manager.start()
for events in stream:
Expand All @@ -125,6 +130,7 @@ def events_to_file(
t0 = 0
elif file_type == "dat":
assert path is not None
assert enforce_monotonic_timestamps, "DAT files do not support non-monotonic timestamps, since timestamps may overflow"
with dat.Encoder(
path=path if write_path is None else write_path,
version="dat2" if version is None else version, # type: ignore
Expand Down Expand Up @@ -158,6 +164,7 @@ def events_to_file(
state_manager.end()
elif file_type == "es":
assert path is not None
assert enforce_monotonic_timestamps, "ES files do not support non-monotonic timestamps"
with event_stream.Encoder(
path=path if write_path is None else write_path,
event_type="dvs",
Expand All @@ -184,6 +191,7 @@ def events_to_file(
version="evt3" if version is None else version, # type: ignore
zero_t0=zero_t0,
dimensions=dimensions,
enforce_monotonic=enforce_monotonic_timestamps,
) as encoder:
state_manager.start()
for events in stream:
Expand Down
1 change: 1 addition & 0 deletions python/faery/types/csv.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ class Encoder:
separator: int,
header: bool,
dimensions: tuple[int, int],
enforce_monotonic: bool,
): ...
def __enter__(self) -> Encoder: ...
def __exit__(
Expand Down
1 change: 1 addition & 0 deletions python/faery/types/evt.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ class Encoder:
version: typing.Literal["evt2", "evt2.1", "evt3"],
zero_t0: bool,
dimensions: tuple[int, int],
enforce_monotonic: bool,
): ...
def __enter__(self) -> Encoder: ...
def __exit__(
Expand Down
5 changes: 4 additions & 1 deletion src/csv/encoder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ pub struct Encoder {
dimensions: (u16, u16),
separator: char,
previous_t: u64,
enforce_monotonic: bool,
}

#[derive(thiserror::Error, Debug)]
Expand All @@ -48,6 +49,7 @@ impl Encoder {
separator: u8,
header: bool,
dimensions: (u16, u16),
enforce_monotonic: bool,
) -> Result<Self, Error> {
let separator = std::str::from_utf8(&[separator])?
.chars()
Expand All @@ -67,14 +69,15 @@ impl Encoder {
dimensions,
separator,
previous_t: 0,
enforce_monotonic
})
}

pub fn write(
&mut self,
event: neuromorphic_types::DvsEvent<u64, u16, u16>,
) -> Result<(), utilities::WriteError> {
if event.t < self.previous_t {
if self.enforce_monotonic && event.t < self.previous_t {
return Err(utilities::WriteError::NonMonotonic {
previous_t: self.previous_t,
t: event.t,
Expand Down
2 changes: 2 additions & 0 deletions src/csv/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ impl Encoder {
separator: u8,
header: bool,
dimensions: (u16, u16),
enforce_monotonic: bool,
) -> PyResult<Self> {
Python::with_gil(|python| -> PyResult<Self> {
Ok(Encoder {
Expand All @@ -165,6 +166,7 @@ impl Encoder {
separator,
header,
dimensions,
enforce_monotonic,
)?),
})
})
Expand Down
Loading