Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Modify box demo #108

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ third_party/detectron2
./models
configs-experimental
experiments
# output dir
output dir
index.html
data/*
slurm/
Expand Down
Binary file added datasets/metadata/lvis-21k_clip_a+cname.npy
Binary file not shown.
20 changes: 4 additions & 16 deletions demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,9 @@ def test_opencv_video_format(codec, file_ext):

demo = VisualizationDemo(cfg, args)

if args.output:
if not os.path.exists(args.output):
os.makedirs(args.output)
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
Expand All @@ -147,23 +150,8 @@ def test_opencv_video_format(codec, file_ext):
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: {} in {:.2f}s".format(
path,
"detected {} instances".format(len(predictions["instances"]))
if "instances" in predictions
else "finished",
time.time() - start_time,
)
)

if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
else:
assert len(args.input) == 1, "Please specify a directory with args.output"
out_filename = args.output
out_filename = os.path.join(args.output, os.path.basename(path))
visualized_output.save(out_filename)
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
Expand Down
249 changes: 249 additions & 0 deletions demo_box.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,249 @@
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm
import sys
import mss



from pascal_voc_writer import Writer

from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger

# os.chdir('/home/zeyu/Detic/')
sys.path.insert(0, 'third_party/CenterNet2/')
# sys.path.append(os.path.dirname('/home/zeyu/Autodet2/Detic/third_party/CenterNet2/'))

from centernet.config import add_centernet_config
from detic.config import add_detic_config

from detic.predictor import VisualizationDemo

# Fake a video capture object OpenCV style - half width, half height of first screen using MSS
class ScreenGrab:
def __init__(self):
self.sct = mss.mss()
m0 = self.sct.monitors[0]
self.monitor = {'top': 0, 'left': 0, 'width': m0['width'] / 2, 'height': m0['height'] / 2}

def read(self):
img = np.array(self.sct.grab(self.monitor))
nf = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
return (True, nf)

def isOpened(self):
return True
def release(self):
return True


# constants
WINDOW_NAME = "Detic"

def setup_cfg(args):
cfg = get_cfg()
if args.cpu:
cfg.MODEL.DEVICE="cpu"
add_centernet_config(cfg)
add_detic_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH = 'rand' # load later
if not args.pred_all_class:
cfg.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL = True
cfg.freeze()
return cfg


def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", help="Take inputs from webcam.")
parser.add_argument("--cpu", action='store_true', help="Use CPU only.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--vocabulary",
default="lvis",
choices=['lvis', 'openimages', 'objects365', 'coco', 'custom'],
help="",
)
parser.add_argument(
"--custom_vocabulary",
default="",
help="",
)
parser.add_argument("--pred_all_class", action='store_true')
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser


def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False


if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))

cfg = setup_cfg(args)

demo = VisualizationDemo(cfg, args)
if args.output:
if not os.path.exists(args.output):
os.makedirs(args.output)

if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input, disable=not args.output):
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)

boxes = predictions['instances'].get_fields()['pred_boxes'].tensor.cpu().numpy()
classes = predictions['instances'].get_fields()['pred_classes'].cpu().numpy()
# ind = np.concatenate((np.where(classes == 134)+np.where(classes == 383)))
# boxes = boxes[ind]
writer = Writer(path, img.shape[0], img.shape[1])
for i,box in enumerate(boxes):
# print(demo.metadata.thing_classes[classes[i]])
writer.addObject(demo.metadata.thing_classes[classes[i]], box[0], box[1], box[2], box[3])
#folder_name = path.split('.')[0].split("/")[-2]
#writer.save(path.split('.')[0].replace(folder_name, folder_name+"_xml")+".xml")
# print(writer)
# writer.save(path.replace('.jpg', '.xml'))


# logger.info(
# "{}: {} in {:.2f}s".format(
# path,
# "detected {} instances".format(len(predictions["instances"]))
# if "instances" in predictions
# else "finished",
# time.time() - start_time,
# )
# )

if args.output:
out_filename = os.path.join(args.output, os.path.basename(path))
# visualized_output.save(out_filename)
writer.save(out_filename.replace('.jpg', '.xml'))
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break # esc to quit
elif args.webcam:
assert args.input is None, "Cannot have both --input and --webcam!"
assert args.output is None, "output not yet supported with --webcam!"
if args.webcam == "screen":
cam = ScreenGrab()
else:
cam = cv2.VideoCapture(int(args.webcam))
for vis in tqdm.tqdm(demo.run_on_video(cam)):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis)
if cv2.waitKey(1) == 27:
break # esc to quit
cam.release()
cv2.destroyAllWindows()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
codec, file_ext = (
("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
)
if codec == ".mp4v":
warnings.warn("x264 codec not available, switching to mp4v")
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + file_ext
else:
output_fname = args.output
assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
else:
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break # esc to quit
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows()
Binary file added embedding_matrix_imagenet.pt
Binary file not shown.
Binary file added embedding_matrix_lvis.pt
Binary file not shown.
1 change: 1 addition & 0 deletions imagenet_class_info.json

Large diffs are not rendered by default.

Binary file added lvis_cat_id_to_imgdir.npy
Binary file not shown.
Loading