diff --git a/src/abc.py b/src/abc.py index c2a014d..e97d2e0 100644 --- a/src/abc.py +++ b/src/abc.py @@ -18,6 +18,9 @@ def stop_and_join(self): class ISpeaker(object): + def __init__(self, *args): + pass + def play(self, *args): pass diff --git a/src/blackbox.py b/src/blackbox.py index 464192e..6a19a59 100644 --- a/src/blackbox.py +++ b/src/blackbox.py @@ -5,6 +5,9 @@ class BlackBox(object): def __init__(self, image_saver): self._image_saver = image_saver + def log_image(self, image): + self._image_saver.save_image(image) + def log(self, image, traffic_lights, objects_by_label, tracker): # save image with detection overlay, diff --git a/src/camera_capturer.py b/src/camera_capturer.py index f4924ec..4f6aed8 100644 --- a/src/camera_capturer.py +++ b/src/camera_capturer.py @@ -12,12 +12,6 @@ def __init__(self, camera, fps, is_recording_query_func, pubsub, inference_confi self._camera = camera self._dt = 1.0 / fps self._inference_resolution = inference_config.inference_resolution - inference_width = int(ceil(self._inference_resolution[0] / 32) * 32) - inference_scale = self._inference_resolution[0] / self._camera.resolution[0] - inference_height = self._camera.resolution[1] * inference_scale - inference_height = int(inference_height // 16 * 16) - - self._capture_resolution = (inference_width, inference_height) self._is_recording_query_func = is_recording_query_func self._thread = threading.Thread(target=self._run, daemon=True) self._stream = io.BytesIO() @@ -28,23 +22,21 @@ def _run(self): if self._is_recording_query_func(): start = time.perf_counter() - # 80-150ms - self._camera.capture( - self._stream, - format="rgb", - resize=self._capture_resolution, - use_video_port=True, - ) - self._stream.truncate() - self._stream.seek(0) - img = Image.frombuffer( - "RGB", self._capture_resolution, self._stream.getvalue() - ) + request = self._camera.capture_request() + img = request.make_array("lores") + img = Image.fromarray(img) + request.release() + #print("capture: {} ms".format((time.perf_counter() - start)*1000)) + + #self._stream.truncate() + #self._stream.seek(0) + #img = Image.frombuffer( + # "RGB", self._inference_resolution, self._stream.getvalue() + #) current_time = time.perf_counter() logging.debug("capture at {}".format(current_time)) - # print("capture 1: {} ms".format((time.perf_counter() - start)*1000)) self._pubsub.publish((img, current_time)) - logging.debug("exposure_speed:{}".format(self._camera.exposure_speed)) + #logging.debug("exposure_speed:{}".format(self._camera.exposure_speed)) # print("capture 2: {} ms".format((time.perf_counter() - start)*1000)) time.sleep(self._dt) diff --git a/src/camera_recorder.py b/src/camera_recorder.py index d8bbc33..888cf50 100644 --- a/src/camera_recorder.py +++ b/src/camera_recorder.py @@ -15,35 +15,32 @@ class StopEvent(object): def execute(self, camera_recorder): camera_recorder._stop_recording() - class CameraRecorder(object): - def __init__(self, camera, led, recording_folder, daemon=True): + def __init__(self, camera, fps, led, recording_folder, daemon=True): self._folder = recording_folder self._folder.mkdir(parents=True, exist_ok=True) self._camera = camera self._led = led + self._fps = fps # self._tape = Tape(self.fps, self._format) self._is_recording = False self._event_queue = queue.Queue() self._encoder = H264Encoder(10000000) - filepath = "{}/recording_{}_%03d.mp4".format( + filepath = "{}/recording_{}.mp4".format( self._folder, time.strftime("%Y%m%d-%H%M%S") ) - ffmpeg_cmd = """-v 16 -framerate {0} -f {1} - -i pipe:0 -codec copy - -movflags faststart - -segment_time 00:01:00 -f segment -reset_timestamps 1 - -y {2}""".format( - self.fps, "h264", filepath) - self._tape = FfmpegOutput(self._ffmpeg_cmd) + self._tape = FfmpegOutput(filepath) if daemon: self._thread = threading.Thread(target=self.run, daemon=True) self._thread.start() - @property #fixme + @property def fps(self): - return self._camera.video_configuration.controls.FrameRate + return self._fps + #metadata = self._camera.capture_metadata() + #framerate = 1000000 / metadata["FrameDuration"] + #return framerate def is_recording(self): return self._is_recording @@ -64,7 +61,7 @@ def _start_recording(self): def _stop_recording(self): if self._is_recording: self._camera.stop_recording() - self._tape.close() + #self._tape.close() self._is_recording = False logging.info("stop recording") @@ -75,6 +72,7 @@ def process_event(self): def run(self, start_recording=True): if start_recording: + logging.info("start camera recording") self._start_recording() while True: if self._is_recording: diff --git a/src/copilot.py b/src/copilot.py index cddd08d..50c2d7c 100644 --- a/src/copilot.py +++ b/src/copilot.py @@ -52,19 +52,19 @@ def __init__( self._traffic_light_state = TrafficLightStateAdaptor(args.mode) self._ssd_interpreter = ssd_interpreter - self._ssd_interpreter.allocate_tensors() + #self._ssd_interpreter.allocate_tensors() self._classfication_interpreter = traffic_light_classifier_interpreter - self._classfication_interpreter.allocate_tensors() + #self._classfication_interpreter.allocate_tensors() # self._traffic_light_size = common.input_size(self._classfication_interpreter) self._speaker = speaker - input_shape = self._ssd_interpreter.get_input_details()[0]["shape"] + #input_shape = self._ssd_interpreter.get_input_details()[0]["shape"] tile_w_overlap, tile_h_overlap = self._inference_config.tile_overlap tile_size = self._inference_config.tile_size - assert tile_size == input_shape[1] - self._tile_config = TileConfig(tile_size, tile_w_overlap, tile_h_overlap) + #assert tile_size == input_shape[1] + #self._tile_config = TileConfig(tile_size, tile_w_overlap, tile_h_overlap) # self._ssd_labels = read_label_file(self._args.label) if self._args.label else {} # self._traffic_light_labels = ( @@ -98,6 +98,8 @@ def run(self): prev_cycle_time = current_cycle_time logging.debug("recv image from: {}".format(image_time)) # self.process(image) + self._blackbox.log_image(image) + logging.debug( "process time: %.2f ms" % ((time.perf_counter() - current_cycle_time) * 1000) diff --git a/src/image_saver.py b/src/image_saver.py index f914252..56efd17 100644 --- a/src/image_saver.py +++ b/src/image_saver.py @@ -34,6 +34,10 @@ def save_image_and_traffic_lights(self, image, traffic_lights): self._task_queue.put(lambda: self._save_image(image, ti)) self._task_queue.put(lambda: self._save_traffic_lights(traffic_lights, ti)) + def save_image(self, image): + ti = datetime.now().strftime("%Y%m%d-%H%M%S.%f")[:-3] + self._task_queue.put(lambda: self._save_image(image, ti)) + def _save_traffic_lights(self, traffic_lights, name_prefix): for i, t in enumerate(traffic_lights): filename = self._rec_detection_folder.joinpath( diff --git a/src/main.py b/src/main.py index c4d3f4e..187f665 100644 --- a/src/main.py +++ b/src/main.py @@ -7,7 +7,8 @@ from picamera2 import Picamera2 from src.os_utils import generate_recording_postfix -from .abc import ILed +from .abc import ILed as Led +from .abc import ISpeaker as Speaker from .camera_capturer import CameraCapturer from .camera_recorder import CameraRecorder from .pubsub import PubSub @@ -16,10 +17,13 @@ from .copilot import CoPilot from .image_saver import AsyncImageSaver from .blackbox import BlackBox -from .speaker import Speaker +#from .speaker import Speaker from .utils import run_periodic from .disk_manager import DiskManager +def make_interpreter(model_path): + pass + def parse_arguments(): parser = argparse.ArgumentParser() @@ -111,31 +115,38 @@ def main(): camera = Picamera2() - main_stream = {"size": camera_info.resolution} - lores_stream = {"size": inference_config.inference_resolution} + lores_stream = {"size": inference_config.inference_resolution, "format": "RGB888"} video_config = camera.create_video_configuration(main_stream, lores_stream, encode="main") camera.configure(video_config) + fps = 20.0 + camera.set_controls({"FrameRate": fps, "ExposureTime": 20000}) + + #metadata = camera.capture_metadata() + + #print("here 4") + #framerate = 1000000 / metadata["FrameDuration"] + #print("here 5") + + #logging.debug("configure camera done, fps: {}".format(framerate)) - camera.set_controls({"FrameRate": 20}) - metadata = camera.capture_metadata() - logging.info("metadata") - logging.info(metadata) #camera.framerate = 20 #camera.exposure_mode = "sports" led_pin = 10 - led = ILed() - camera_recorder = CameraRecorder(camera, led, args.blackbox_path) + led = Led() + camera_recorder = CameraRecorder(camera, fps, led, args.blackbox_path) camera_capturer = CameraCapturer( camera, 5, camera_recorder.is_recording, pubsub, inference_config ) if args.cpu: - from tflite_runtime.interpreter import Interpreter as make_interpreter + #from tflite_runtime.interpreter import Interpreter as make_interpreter + pass else: - from pycoral.utils.edgetpu import make_interpreter + #from pycoral.utils.edgetpu import make_interpreter + pass try: copilot = CoPilot(