Skip to content

Commit

Permalink
Fix camera recording and capture
Browse files Browse the repository at this point in the history
  • Loading branch information
xeonqq committed Mar 17, 2024
1 parent baf0f48 commit 118fdfe
Show file tree
Hide file tree
Showing 7 changed files with 63 additions and 50 deletions.
3 changes: 3 additions & 0 deletions src/abc.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ def stop_and_join(self):


class ISpeaker(object):
def __init__(self, *args):
pass

def play(self, *args):
pass

Expand Down
3 changes: 3 additions & 0 deletions src/blackbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ class BlackBox(object):
def __init__(self, image_saver):
self._image_saver = image_saver

def log_image(self, image):
self._image_saver.save_image(image)

def log(self, image, traffic_lights, objects_by_label, tracker):

# save image with detection overlay,
Expand Down
32 changes: 12 additions & 20 deletions src/camera_capturer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,6 @@ def __init__(self, camera, fps, is_recording_query_func, pubsub, inference_confi
self._camera = camera
self._dt = 1.0 / fps
self._inference_resolution = inference_config.inference_resolution
inference_width = int(ceil(self._inference_resolution[0] / 32) * 32)
inference_scale = self._inference_resolution[0] / self._camera.resolution[0]
inference_height = self._camera.resolution[1] * inference_scale
inference_height = int(inference_height // 16 * 16)

self._capture_resolution = (inference_width, inference_height)
self._is_recording_query_func = is_recording_query_func
self._thread = threading.Thread(target=self._run, daemon=True)
self._stream = io.BytesIO()
Expand All @@ -28,23 +22,21 @@ def _run(self):
if self._is_recording_query_func():

start = time.perf_counter()
# 80-150ms
self._camera.capture(
self._stream,
format="rgb",
resize=self._capture_resolution,
use_video_port=True,
)
self._stream.truncate()
self._stream.seek(0)
img = Image.frombuffer(
"RGB", self._capture_resolution, self._stream.getvalue()
)
request = self._camera.capture_request()
img = request.make_array("lores")
img = Image.fromarray(img)
request.release()
#print("capture: {} ms".format((time.perf_counter() - start)*1000))

#self._stream.truncate()
#self._stream.seek(0)
#img = Image.frombuffer(
# "RGB", self._inference_resolution, self._stream.getvalue()
#)

current_time = time.perf_counter()
logging.debug("capture at {}".format(current_time))
# print("capture 1: {} ms".format((time.perf_counter() - start)*1000))
self._pubsub.publish((img, current_time))
logging.debug("exposure_speed:{}".format(self._camera.exposure_speed))
#logging.debug("exposure_speed:{}".format(self._camera.exposure_speed))
# print("capture 2: {} ms".format((time.perf_counter() - start)*1000))
time.sleep(self._dt)
24 changes: 11 additions & 13 deletions src/camera_recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,35 +15,32 @@ class StopEvent(object):
def execute(self, camera_recorder):
camera_recorder._stop_recording()


class CameraRecorder(object):
def __init__(self, camera, led, recording_folder, daemon=True):
def __init__(self, camera, fps, led, recording_folder, daemon=True):
self._folder = recording_folder
self._folder.mkdir(parents=True, exist_ok=True)
self._camera = camera
self._led = led
self._fps = fps
# self._tape = Tape(self.fps, self._format)
self._is_recording = False
self._event_queue = queue.Queue()
self._encoder = H264Encoder(10000000)

filepath = "{}/recording_{}_%03d.mp4".format(
filepath = "{}/recording_{}.mp4".format(
self._folder, time.strftime("%Y%m%d-%H%M%S")
)
ffmpeg_cmd = """-v 16 -framerate {0} -f {1}
-i pipe:0 -codec copy
-movflags faststart
-segment_time 00:01:00 -f segment -reset_timestamps 1
-y {2}""".format(
self.fps, "h264", filepath)
self._tape = FfmpegOutput(self._ffmpeg_cmd)
self._tape = FfmpegOutput(filepath)
if daemon:
self._thread = threading.Thread(target=self.run, daemon=True)
self._thread.start()

@property #fixme
@property
def fps(self):
return self._camera.video_configuration.controls.FrameRate
return self._fps
#metadata = self._camera.capture_metadata()
#framerate = 1000000 / metadata["FrameDuration"]
#return framerate

def is_recording(self):
return self._is_recording
Expand All @@ -64,7 +61,7 @@ def _start_recording(self):
def _stop_recording(self):
if self._is_recording:
self._camera.stop_recording()
self._tape.close()
#self._tape.close()
self._is_recording = False
logging.info("stop recording")

Expand All @@ -75,6 +72,7 @@ def process_event(self):

def run(self, start_recording=True):
if start_recording:
logging.info("start camera recording")
self._start_recording()
while True:
if self._is_recording:
Expand Down
12 changes: 7 additions & 5 deletions src/copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,19 +52,19 @@ def __init__(
self._traffic_light_state = TrafficLightStateAdaptor(args.mode)

self._ssd_interpreter = ssd_interpreter
self._ssd_interpreter.allocate_tensors()
#self._ssd_interpreter.allocate_tensors()

self._classfication_interpreter = traffic_light_classifier_interpreter
self._classfication_interpreter.allocate_tensors()
#self._classfication_interpreter.allocate_tensors()
# self._traffic_light_size = common.input_size(self._classfication_interpreter)

self._speaker = speaker

input_shape = self._ssd_interpreter.get_input_details()[0]["shape"]
#input_shape = self._ssd_interpreter.get_input_details()[0]["shape"]
tile_w_overlap, tile_h_overlap = self._inference_config.tile_overlap
tile_size = self._inference_config.tile_size
assert tile_size == input_shape[1]
self._tile_config = TileConfig(tile_size, tile_w_overlap, tile_h_overlap)
#assert tile_size == input_shape[1]
#self._tile_config = TileConfig(tile_size, tile_w_overlap, tile_h_overlap)

# self._ssd_labels = read_label_file(self._args.label) if self._args.label else {}
# self._traffic_light_labels = (
Expand Down Expand Up @@ -98,6 +98,8 @@ def run(self):
prev_cycle_time = current_cycle_time
logging.debug("recv image from: {}".format(image_time))
# self.process(image)
self._blackbox.log_image(image)

logging.debug(
"process time: %.2f ms"
% ((time.perf_counter() - current_cycle_time) * 1000)
Expand Down
4 changes: 4 additions & 0 deletions src/image_saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ def save_image_and_traffic_lights(self, image, traffic_lights):
self._task_queue.put(lambda: self._save_image(image, ti))
self._task_queue.put(lambda: self._save_traffic_lights(traffic_lights, ti))

def save_image(self, image):
ti = datetime.now().strftime("%Y%m%d-%H%M%S.%f")[:-3]
self._task_queue.put(lambda: self._save_image(image, ti))

def _save_traffic_lights(self, traffic_lights, name_prefix):
for i, t in enumerate(traffic_lights):
filename = self._rec_detection_folder.joinpath(
Expand Down
35 changes: 23 additions & 12 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
from picamera2 import Picamera2

from src.os_utils import generate_recording_postfix
from .abc import ILed
from .abc import ILed as Led
from .abc import ISpeaker as Speaker
from .camera_capturer import CameraCapturer
from .camera_recorder import CameraRecorder
from .pubsub import PubSub
Expand All @@ -16,10 +17,13 @@
from .copilot import CoPilot
from .image_saver import AsyncImageSaver
from .blackbox import BlackBox
from .speaker import Speaker
#from .speaker import Speaker
from .utils import run_periodic
from .disk_manager import DiskManager

def make_interpreter(model_path):
pass


def parse_arguments():
parser = argparse.ArgumentParser()
Expand Down Expand Up @@ -111,31 +115,38 @@ def main():

camera = Picamera2()


main_stream = {"size": camera_info.resolution}
lores_stream = {"size": inference_config.inference_resolution}
lores_stream = {"size": inference_config.inference_resolution, "format": "RGB888"}
video_config = camera.create_video_configuration(main_stream, lores_stream, encode="main")
camera.configure(video_config)
fps = 20.0
camera.set_controls({"FrameRate": fps, "ExposureTime": 20000})

#metadata = camera.capture_metadata()

#print("here 4")
#framerate = 1000000 / metadata["FrameDuration"]
#print("here 5")

#logging.debug("configure camera done, fps: {}".format(framerate))

camera.set_controls({"FrameRate": 20})
metadata = camera.capture_metadata()
logging.info("metadata")
logging.info(metadata)

#camera.framerate = 20
#camera.exposure_mode = "sports"

led_pin = 10
led = ILed()
camera_recorder = CameraRecorder(camera, led, args.blackbox_path)
led = Led()
camera_recorder = CameraRecorder(camera, fps, led, args.blackbox_path)
camera_capturer = CameraCapturer(
camera, 5, camera_recorder.is_recording, pubsub, inference_config
)

if args.cpu:
from tflite_runtime.interpreter import Interpreter as make_interpreter
#from tflite_runtime.interpreter import Interpreter as make_interpreter
pass
else:
from pycoral.utils.edgetpu import make_interpreter
#from pycoral.utils.edgetpu import make_interpreter
pass

try:
copilot = CoPilot(
Expand Down

0 comments on commit 118fdfe

Please sign in to comment.