Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
DaHyeonnn authored Jul 23, 2022
1 parent cf70ce4 commit 11e2801
Show file tree
Hide file tree
Showing 16 changed files with 2,839 additions and 0 deletions.
674 changes: 674 additions & 0 deletions scripts/LICENSE

Large diffs are not rendered by default.

Binary file added scripts/car_people.pt
Binary file not shown.
167 changes: 167 additions & 0 deletions scripts/detect_with_Camera.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
# coco dataset
#!/home/autonav-linux/catkin_ws/src/yolov5_ROS/scripts/yolov5/bin/python3
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Run inference on images, videos, directories, streams, etc.
Usage - sources:
$ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam
img.jpg # image
vid.mp4 # video
path/ # directory
path/*.jpg # glob
'https://youtu.be/Zgi9g1ksQHc' # YouTube
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
Usage - formats:
$ python path/to/detect.py --weights yolov5s.pt # PyTorch
yolov5s.torchscript # TorchScript
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
yolov5s.xml # OpenVINO
yolov5s.engine # TensorRT
yolov5s.mlmodel # CoreML (MacOS-only)
yolov5s_saved_model # TensorFlow SavedModel
yolov5s.pb # TensorFlow GraphDef
yolov5s.tflite # TensorFlow Lite
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
"""

import os
import sys
from pathlib import Path

import time
import copy
import cv2
import torch
import torch.backends.cudnn as cudnn

FILE = Path(__file__).resolve()
SIZE_REDUCE = True

ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative

from models.common import DetectMultiBackend
from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
from utils.general import (LOGGER, check_file, check_img_size, check_requirements, colorstr,
increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, time_sync

# IMGSZ = (640, 480)
IMGSZ = (1920, 1080)
FPS = 13 # 0 -> as much as possable (default)
pass_list = ["person", "car"]
@torch.no_grad()
def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
# def run(weights=ROOT / 'RTX_3090_0516.pt', # model.pt path(s)
# data=ROOT / 'data/coco128.yaml', # dataset.yaml path
conf_thres=0.65, # confidence threshold
iou_thres=0.6, # NMS IOU threshold;
max_det=1000, # maximum detections per image
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
):

# Load model
device = select_device(device)
# model = DetectMultiBackend(weights, device=device, dnn=False, data=data)
model = DetectMultiBackend(weights, device=device, dnn=False)
stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine
imgsz = check_img_size(IMGSZ, s=stride) # check image size

# Half
half = False
half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA
if pt or jit:
model.model.half() if half else model.model.float()

# Dataloader
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(img_size=imgsz, stride=stride, auto=pt, fps=FPS)
bs = 1

# Run inference
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz), half=half) # warmup
dt, seen = [0.0, 0.0, 0.0], 0

while True:
s_time = time.time()
im, im0, vid_cap, s = dataset.return_info()
t1 = time_sync()
im = torch.from_numpy(im).to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
t2 = time_sync()
dt[0] += t2 - t1

# Inference
pred = model(im, augment=False, visualize=False)
t3 = time_sync()
dt[1] += t3 - t2

# NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, None, None, max_det=max_det)
dt[2] += time_sync() - t3

# Second-stage classifier (optional)
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)

# Process predictions
for i, det in enumerate(pred): # per image
seen += 1
s += f'{i}: '

s += '%gx%g ' % im.shape[2:] # print string
annotator = Annotator(im0, line_width=line_thickness, example=str(names))

if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()

# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string

save_txt = ""
# Write results
for *xyxy, conf, cls in reversed(det):
c = int(cls) # integer class
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
# print(str(label).split(" ")[0], "tv")

if str(label).split(" ")[0] in pass_list:
pass
else:
continue

annotator.box_label(xyxy, label, color=colors(c, True))
save_list = [str(i.tolist()) for i in xyxy]
pre_txt = ", ".join(save_list)
save_txt += f"{label}-{pre_txt}/"
# print(save_txt)

# Stream results
im0 = annotator.result()
im0 = cv2.resize(im0, (1280, 720))
cv2.imshow("res", im0)
# cv2.imwrite("res1.png", im0)
cv2.waitKey(1) # 1 millisecond
# Print time (inference-only)
# LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
print(1/(time.time()-s_time), time.time()-s_time)

# Print results
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)

if __name__ == "__main__":
run()
Loading

0 comments on commit 11e2801

Please sign in to comment.