This repository has been archived by the owner on Jan 10, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 104
/
test_davis_videos.py
53 lines (42 loc) · 1.61 KB
/
test_davis_videos.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from options.train_options import TrainOptions
from loaders import aligned_data_loader
from models import pix2pix_model
BATCH_SIZE = 1
opt = TrainOptions().parse() # set CUDA_VISIBLE_DEVICES before import torch
video_list = 'test_data/test_davis_video_list.txt'
eval_num_threads = 2
video_data_loader = aligned_data_loader.DAVISDataLoader(video_list, BATCH_SIZE)
video_dataset = video_data_loader.load_data()
print('========================= Video dataset #images = %d =========' %
len(video_data_loader))
model = pix2pix_model.Pix2PixModel(opt)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
best_epoch = 0
global_step = 0
print(
'================================= BEGIN VALIDATION ====================================='
)
print('TESTING ON VIDEO')
model.switch_to_eval()
save_path = 'test_data/viz_predictions/'
print('save_path %s' % save_path)
for i, data in enumerate(video_dataset):
print(i)
stacked_img = data[0]
targets = data[1]
model.run_and_save_DAVIS(stacked_img, targets, save_path)