-
Notifications
You must be signed in to change notification settings - Fork 2
/
comp.py
1172 lines (901 loc) · 50.5 KB
/
comp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
r"""
I do not provide support for this unless its an actual error in the code and not related to your setup.
This script was originally written for VS R53 and Python 3.9, and has been tested on VS R65 and Python 3.11.
You'll need:
- VapourSynth (https://github.com/vapoursynth/vapoursynth/releases)
- "pip install anitopy pyperclip requests requests_toolbelt natsort vstools rich colorama" in terminal (without quotes)
- "vsrepo install fpng lsmas sub" in terminal (without quotes) or the following installed to your usual VapourSynth plugins folder:
- https://github.com/Mikewando/vsfpng
- https://github.com/AkarinVS/L-SMASH-Works/releases/latest
- https://github.com/vapoursynth/subtext/releases/latest
- Note: plugins folder is typically found in "%AppData%\Roaming\VapourSynth\plugins64" or "C:\Program Files\VapourSynth\plugins"
- Optional: If using FFmpeg, it must be installed and in PATH.
How to use:
- Drop comp.py into a folder with the video files you want to compare.
- (Recommended) Rename your files to have the typical [Group] Show - Ep.mkv naming, since the script will try to parse the group and show name.
e.g. [JPBD] Youjo Senki - 01.m2ts; [Vodes] Youjo Senki - 01.mkv.
- Change variables below.
- Run comp.py.
"""
# Ram limit (in MB)
ram_limit = 4000
# Number of dark, bright, and high motion frames to algorithmically select.
frame_count_dark = 20
frame_count_bright = 10
frame_count_motion = 15
# Choose your own frames to export. Does not decrease the number of algorithmically selected frames.
user_frames = []
# Number of frames to choose randomly. Completely separate from frame_count_bright, frame_count_dark, and save_frames. Will change every time you run the script.
random_frames = 15
# Save the brightness data in a text file so it doesn't have to be reanalysed next time the script is run. Frames will be reanalysed if show/movie name or episode numbers change.
# Does not save user_frames or random_frames.
save_frames = True
# Print frame info on screenshots.
frame_info = True
# Upscale videos to make the clips match the highest found res.
upscale = True
# Scale all videos to one vertical resolution. Set to 0 to disable, otherwise input the desired vertical res.
single_res = 0
# Use FFmpeg as the image renderer. If false, fpng is used instead
ffmpeg = False
# Compression level. For FFmpeg, range is 0-100. For fpng, 0 is fast, 1 is slow, 2 is uncompressed.
compression = 1
# Automatically upload to slow.pics.
slowpics = True
# Flags to toggle for slowpics settings.
hentai_flag = False
public_flag = True
# TMDB ID of show or movie being comped. Should be in the format "TV_XXXXXX" or "MOVIE_XXXXXX".
tmdbID = ""
# Remove the comparison after this many days. Set to 0 to disable.
remove_after = 0
# Output slow.pics link to discord webhook. Disabled if empty.
webhook_url = r""
# Automatically open slow.pics url in default browser
browser_open = True
# Create a URL shortcut for each comparison uploaded.
url_shortcut = True
# Automatically delete the screenshot directory after uploading to slow.pics.
delete_screen_dir = True
"""
Used to trim clips, or add blank frames to the beginning of a clip.
Clips are taken in alphabetical order of the filenames.
First input can be the filename, group name, or index of the file. Second input must be an integer.
Example:
trim_dict = {0: 1000, "Vodes": 1046, 3:-50}
trim_dict_end = {"Youjo Senki - 01.mkv": 9251, 4: -12}
First clip will start at frame 1000.
Clip with group name "Vodes" will start at frame 1046.
Clip with filename "Youjo Senki - 01.mkv" will end at frame 9251.
Fourth clip will have 50 blank frames appended to its start.
Fifth clip will end 12 frames early.
Note:
If multiple files have the same group name, the trim will be applied to all of them.
"""
trim_dict = {}
trim_dict_end = {}
"""
Actively adjusts a clip's fps to a target. Useful for sources which incorrectly convert 23.976fps to 24fps.
First input can be the filename, group name, or index of the file.
Second input must be a fraction split into a list. Numerator comes first, denominator comes second.
Second input can also be the string "set". This will make all other files, if unspecified fps, use the set file's fps.
Example:
change_fps = {0: [24, 1], 1: [24000, 1001]}
First clip will have its fps adjusted to 24
Second clip will have its fps adjusted to 23.976
Example 2:
change_fps = {0: [24, 1], "MTBB": "set"}
First clip will have its fps adjusted to 24
Every other clip will have its fps adjusted to match MTBB's
Note:
If multiple files have the same group name, the specified fps will be applied to all of them.
"""
change_fps = {}
"""
Specify which clip will be analyzed for frame selection algorithm.
Input can be the filename, group name, or index of the file.
By default will select the file which can be accessed the fastest.
"""
analyze_clip = ""
##### Advanced Settings #####
# Random seed to use in frame selection algorithm. May change selected frames. Recommended to leave as default
random_seed = 20202020
# Filename of the text file in which the brightness data will be stored. Recommended to leave as default.
frame_filename = "generated.compframes"
# Directory in which the screenshots will be kept
screen_dirname = "screens"
# Minimum time between dark, light, and random frames, in seconds. Motion frames use a quarter of this value
screen_separation = 6
# Number of frames in each direction over which the motion data will be averaged out. So a radius of 4 would take the average of 9 frames, the frame in the middle, and 4 in each direction.
# Higher value will make it less likely scene changes get picked up as motion, but may lead to less precise results.
motion_diff_radius = 4
### Not recommended to change stuff below
import os, sys, time, textwrap, re, uuid, random, pathlib, requests, vstools, webbrowser, colorama, shutil, fractions, subprocess
from rich.progress import Progress, BarColumn, TextColumn, TimeRemainingColumn
from natsort import os_sorted
import anitopy as ani
import pyperclip as pc
import vapoursynth as vs
from requests import Session
from functools import partial
from requests_toolbelt import MultipartEncoder
from typing import Any, Dict, List, Optional, BinaryIO, Union, Callable, TypeVar, Sequence, cast
RenderCallback = Callable[[int, vs.VideoFrame], None]
VideoProp = Union[int, Sequence[int],float, Sequence[float],str, Sequence[str],vs.VideoNode, Sequence[vs.VideoNode],vs.VideoFrame, Sequence[vs.VideoFrame],Callable[..., Any], Sequence[Callable[..., Any]]]
T = TypeVar("T", bound=VideoProp)
vs.core.max_cache_size = ram_limit
colorama.init()
def FrameInfo(clip: vs.VideoNode,
title: str,
style: str = "sans-serif,20,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,""0,0,0,0,100,100,0,0,1,2,0,7,10,10,10,1",
newlines: int = 3,
pad_info: bool = False) -> vs.VideoNode:
"""
FrameInfo function stolen from awsmfunc, implemented by LibreSneed
Prints the frame number, frame type and a title on the clip
"""
def FrameProps(n: int, f: vs.VideoFrame, clip: vs.VideoNode, padding: Optional[str]) -> vs.VideoNode:
if "_PictType" in f.props:
info = f"Frame {n} of {clip.num_frames}\nPicture type: {f.props['_PictType'].decode()}"
else:
info = f"Frame {n} of {clip.num_frames}\nPicture type: N/A"
if pad_info and padding:
info_text = [padding + info]
else:
info_text = [info]
clip = vs.core.sub.Subtitle(clip, text=info_text, style=style)
return clip
padding_info: Optional[str] = None
if pad_info:
padding_info = " " + "".join(['\n'] * newlines)
padding_title = " " + "".join(['\n'] * (newlines + 4))
else:
padding_title = " " + "".join(['\n'] * newlines)
clip = vs.core.std.FrameEval(clip, partial(FrameProps, clip=clip, padding=padding_info), prop_src=clip)
clip = vs.core.sub.Subtitle(clip, text=[padding_title + title], style=style)
return clip
def dedupe(clip: vs.VideoNode, framelist: list, framecount: int, diff_thr: int, selected_frames: list = [], seed: int = None, motion: bool = False):
"""
Selects frames from a list as long as they aren't too close together.
:param framelist: Detailed list of frames that has to be cut down.
:param framecount: Number of frames to select.
:param seed: Seed for `random.sample()`.
:param diff_thr: Minimum distance between each frame (in seconds).
:param motion: If enabled, the frames will be put in an ordered list, not selected randomly.
:return: Deduped framelist
"""
random.seed(seed)
thr = round(clip.fps_num / clip.fps_den * diff_thr)
initial_length = len(selected_frames)
while (len(selected_frames) - initial_length) < framecount and len(framelist) > 0:
dupe = False
#get random frame from framelist with removal. if motion, get first frame
if motion:
rand = framelist.pop(0)
else:
rand = framelist.pop(random.randint(0, len(framelist) - 1))
#check if it's too close to an already selected frame
for selected_frame in selected_frames:
if abs(selected_frame - rand) < thr:
dupe = True
break
if not dupe:
selected_frames.append(rand)
selected_frames.sort()
return selected_frames
def lazylist(clip: vs.VideoNode, dark_frames: int = 25, light_frames: int = 15, motion_frames: int = 0, selected_frames: list = [], seed: int = random_seed,
diff_thr: int = screen_separation, diff_radius: int = motion_diff_radius, dark_list: list = None, light_list: list = None, motion_list: list = None,
save_frames: bool = False, file: str = None, files: list = None, files_info: list = None):
"""
Generates a list of frames for comparison purposes.
:param clip: Input clip.
:param dark_frames: Number of dark frames.
:param light_frames: Number of light frames.
:param motion_frames: Number of frames with high level of motion.
:param seed: Seed for `random.sample()`.
:param diff_thr: Minimum distance between each frame (in seconds).
:param diff_thr: Number of frames to take into account when finding high motion frames.
:param dark_list: Pre-existing detailed list of dark frames that needs to be sorted.
:param light_list: Pre-existing detailed list of light frames that needs to be sorted.
:param motion_list: Pre-existing detailed list of high motion frames that needs to be sorted.
:param save_frames: If true, returns detailed lists with every type of frame.
:param file: File being analyzed.
:param files: List of files in directory.
:param files_info: Information for each file in directory.
:return: List of dark, light, and high motion frames.
"""
#if no frames were requested, return empty list before running algorithm
if dark_frames + light_frames + motion_frames == 0:
return [], dark_list, light_list, motion_list
findex = files.index(file)
dark = []
light = []
diff = []
motion = []
if dark_list is None or light_list is None or motion_list is None:
def checkclip(n, f, clip):
avg = f.props["PlaneStatsAverage"]
if 0.062746 <= avg <= 0.380000:
dark.append(n)
elif 0.450000 <= avg <= 0.800000:
light.append(n)
if motion_list is None and motion_frames > 0:
#src = mvf.Depth(clip, 5)
gray = vstools.get_y(clip)
gray_last = vs.core.std.BlankClip(gray)[0] + gray
#make diff between frame and last frame, with prewitt (difference is white on black background)
diff_clip = vs.core.std.MakeDiff(gray_last, gray)
diff_clip = vs.core.std.Prewitt(diff_clip)
diff_clip = diff_clip.std.PlaneStats()
diff.append(diff_clip.get_frame(n).props["PlaneStatsAverage"])
return clip
s_clip = clip.std.PlaneStats()
eval_frames = vs.core.std.FrameEval(clip, partial(checkclip, clip=s_clip), prop_src=s_clip)
#if group name is present, display only it and color it cyan. if group name isnt present, display file name and color it yellow.
if file is not None and files is not None and files_info is not None:
suffix = files_info[findex].get('suffix')
if files_info[findex].get("suffix_color") == "yellow":
message = f'Analyzing video: [yellow]{suffix.strip()}'
elif files_info[findex].get("suffix_color") == "cyan":
message = f"Analyzing video: [cyan]{suffix.strip()}"
else:
message = "Analyzing video"
vstools.clip_async_render(eval_frames, progress=message)
else:
dark = dark_list
light = light_list
diff = motion_list
#remove frames that are within diff_thr seconds of other frames. for dark and light, select random frames as well
selected_frames = dedupe(clip, dark, dark_frames, diff_thr, selected_frames, seed)
selected_frames = dedupe(clip, light, light_frames, diff_thr, selected_frames, seed)
#find frames with most motion
if motion_frames > 0:
avg_diff = []
#get average difference over diff_radius frames in each direction
#store frame number in avg_diff as well in the form [frame, avg_diff]
for i, d in enumerate(diff):
if i >= (diff_radius) and i < (clip.num_frames - diff_radius):
if isinstance(d, float):
surr_frames = diff[i-diff_radius:i+diff_radius+1]
mean = sum(surr_frames) / len(surr_frames)
avg_diff.append([i, mean])
#sort avg_diff list based on the diff values, not the frame numbers
sorted_avg_diff = sorted(avg_diff, key=lambda x: x[1], reverse=True)
for i in range(0, len(sorted_avg_diff)):
motion.append(sorted_avg_diff[i][0])
#remove frames that are too close to other frames. uses lower diff_thr because high motion frames will be different from one another
selected_frames = dedupe(clip, motion, motion_frames, round(diff_thr/4), selected_frames, seed, motion=True)
print()
if save_frames:
dark_list = dark
light_list = light
motion_list = diff
return selected_frames, dark_list, light_list, motion_list
else:
return selected_frames
def _get_slowpics_header(content_length: str, content_type: str, sess: Session) -> Dict[str, str]:
"""
Stolen from vardefunc, fixed by Jimbo.
"""
return {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.9",
"Access-Control-Allow-Origin": "*",
"Content-Length": content_length,
"Content-Type": content_type,
"Origin": "https://slow.pics/",
"Referer": "https://slow.pics/comparison",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
"X-XSRF-TOKEN": sess.cookies.get_dict()["XSRF-TOKEN"]
}
def get_highest_res(files: List[str]) -> int:
"""
Finds the video source with the highest resolution from a list of files.
:param files: The list of files in question.
:return: The width, height, and filename of the highest resolution video.
"""
height = 0
width = 0
filenum = -1
for f in files:
filenum+=1
video = vs.core.lsmas.LWLibavSource(f)
if height < video.height:
height = video.height
width = video.width
max_res_file = filenum
return width, height, max_res_file
def estimate_analysis_time(file, read_len: int=15):
"""
Estimates the time it would take to analyze a video source.
:param read_len: How many frames to read from the video.
"""
clip = vs.core.lsmas.LWLibavSource(file)
#safeguard for if there arent enough frames in clip
while clip.num_frames / 3 + 1 < read_len:
read_len -= 1
clip1 = clip[int(clip.num_frames / 3) : int(clip.num_frames / 3) + read_len]
clip2 = clip[int(clip.num_frames * 2 / 3) : int(clip.num_frames * 2 / 3) + read_len]
def checkclip(n, f, clip):
avg = f.props["PlaneStatsAverage"]
return clip
start_time = time.time()
vstools.clip_async_render(vs.core.std.FrameEval(clip1, partial(checkclip, clip=clip1.std.PlaneStats()), prop_src=clip1.std.PlaneStats()))
elapsed_time = time.time() - start_time
start_time = time.time()
vstools.clip_async_render(vs.core.std.FrameEval(clip2, partial(checkclip, clip=clip2.std.PlaneStats()), prop_src=clip2.std.PlaneStats()))
elapsed_time = (elapsed_time + time.time() - start_time)/2
return elapsed_time
def evaluate_analyze_clip(analyze_clip, files, files_info):
"""
Determines which file should be analyzed by lazylist.
"""
file_analysis_default = False
#check if analyze_clip is an int or string with just an int in it
if (isinstance(analyze_clip, int) and analyze_clip >= 0) or (isinstance(analyze_clip, str) and analyze_clip.isdigit() and int(analyze_clip) >= 0):
first_file = files[int(analyze_clip)]
#check if analyze_clip is a group or file name
elif isinstance(analyze_clip, str) and analyze_clip != "":
matches = 0
for dict in files_info:
if analyze_clip == dict.get("release_group") or analyze_clip == dict.get("file_name") or analyze_clip in dict.get("file_name"):
matches+=1
first_file = files[files_info.index(dict)]
#if no matches found, use default
if matches == 0:
printwrap('No file matching the "analyze_clip" parameter has been found. Using default.')
file_analysis_default = True
if matches > 1:
printwrap('Too many files match the "analyze_clip" parameter. Using default.')
#if no clip specified, use default
else:
file_analysis_default = True
#default: pick file with smallest read time
if file_analysis_default:
printwrap("Determining which file to analyze...\n")
estimated_times = [estimate_analysis_time(file) for file in files]
first_file = files[estimated_times.index(min(estimated_times))]
return first_file
def init_clip(file: str, files: list, trim_dict: dict, trim_dict_end: dict, change_fps: dict = {},
analyze_clip: str = None, files_info: list = None, return_file: bool = False):
"""
Gets trimmed and fps modified clip from video file.
"""
#evaluate analyze_clip if it hasn't been already
if analyze_clip is not None and file is None and first_file is None:
file = evaluate_analyze_clip(analyze_clip, files, files_info)
findex = files.index(file)
clip = vs.core.lsmas.LWLibavSource(file)
if trim_dict.get(findex) is not None:
if trim_dict.get(findex) > 0:
clip = clip[trim_dict.get(findex):]
elif trim_dict.get(findex) < 0:
#append blank clip to beginning of source to "extend" it
clip = vs.core.std.BlankClip(clip)[:(trim_dict.get(findex) * -1)] + clip
#keep count of how many blank frames were appended
extended = trim_dict.get(findex) * -1
if trim_dict_end.get(findex) is not None:
clip = clip[:trim_dict_end.get(findex)]
if change_fps.get(findex) is not None:
clip = vstools.change_fps(clip, fractions.Fraction(numerator=change_fps.get(findex)[0], denominator=change_fps.get(findex)[1]))
if return_file:
return clip, file
else:
return clip
def get_suffixes(files_info: list, first_display: bool = False):
"""
Gets display name ('suffix') and its color for every file based on its release group and filename.
:param files_info: List of dictionaries generated by anitopy for every file.
:param first_display: Whether or not the suffixes are being generated for the program's initial display of found files.
:return: List of dictionaries for every file with 'suffix' and 'suffix_color' updated.
"""
#if group name exists use it, otherwise use file name
for i in range(0, len(files_info)):
if files_info[i].get('release_group') is not None:
files_info[i]['suffix'] = str(files_info[i].get('release_group'))
files_info[i]['suffix_color'] = "cyan"
else:
files_info[i]['suffix'] = files_info[i].get('file_name')
files_info[i]['suffix_color'] = "yellow"
#check for duplicates
for i in range(0, len(files_info)):
matches = [i]
for f in range(i + 1, len(files_info)):
if files_info[i].get('suffix') == files_info[f].get('suffix'):
matches.append(f)
#if duplicates found, check whether they have version number in file name and put it in suffix
if len(matches) > 1:
for f in (matches):
#don't want to rely on anitopy cause i don't know what regex it uses
'''if files_info[f].get('release_version') != None:
files_info[f]['suffix'] = files_info[f].get('suffix') + " " + files_info[f].get('release_version')
files_info[f]['suffix_color'] = "cyan"'''
for pos, letter in enumerate(files_info[f].get('file_name')):
x = 0
if letter.lower() == "v":
while files_info[f].get('file_name')[pos+1:pos+x+2].isdigit() and pos+x+2 <= len(files_info[f].get('file_name')):
x += 1
#if they do, add " vXX" to suffix
#also check that the match for "vXX" not in the file extension
if x > 0 and files_info[f].get('file_name')[pos+1:pos+x+2] not in os.path.splitext(files_info[f].get('file_name'))[1]:
files_info[f]['suffix'] = files_info[f].get('suffix') + " " + files_info[f].get('file_name')[pos:pos+x+1]
files_info[f]['suffix_color'] = "cyan"
break
#check for duplicates again and just set filename this time
for i in range(0, len(files_info)):
matches = [i]
for f in range(i + 1, len(files_info)):
if files_info[i].get('suffix') == files_info[f].get('suffix'):
matches.append(f)
if len(matches) > 1:
for f in (matches):
files_info[f]['suffix'] = files_info[f].get('file_name')
files_info[f]['suffix_color'] = "yellow"
#if it's not the first display, only show file name up until there's a difference with another file name
if not first_display:
for i in range(0, len(files_info)):
highest = 0
highest_file = 0
filename = files_info[i].get('file_name')
if files_info[i].get('suffix') == filename:
for f in range(0, len(files_info)):
pos = 0
if i == f:
continue
while files_info[i].get('file_name')[pos] == files_info[f].get('file_name')[pos]:
pos += 1
if pos > highest:
highest = pos
highest_file = f
#progress bar should take up about half the screen, at least 2/5 of that will be used, max all of it
#original: l_bound = 20, h_bound = 45
consolesize = os.get_terminal_size().columns
progress = min(round(consolesize / 2), 68)
l_bound = round((consolesize - progress) * 2/5)
h_bound = consolesize - progress
#show whole filename if it fits within limit
if len(filename) < (h_bound):
pass
#put "..." at the end if the different part appears within limit
elif highest < h_bound-3:
files_info[i]['suffix'] = filename[:h_bound-3].strip() + "..."
#if section thats different starts less than "l_bound" chars away from end, put "..." in middle of name, with diff following it
elif len(filename[highest+1:]) <= l_bound:
files_info[i]['suffix'] = filename[:h_bound-3-len(filename[highest+1:])].strip() + "..." + filename[highest+1:].strip()
#if section thats different starts more than "l_bound" chars away from end, put "..." then diff in parentheses
else:
for pos, letter in enumerate(filename):
if pos >= len(files_info[highest_file].get('file_name')):
break
if letter != files_info[highest_file].get('file_name')[pos]:
last_diff_pos = pos
if last_diff_pos + 1 == len(files_info[highest_file].get('file_name')):
diff = filename[highest:]
else:
diff = filename[highest:last_diff_pos+1]
#if all of the diff fits
if len(diff) < (h_bound-l_bound-6):
files_info[i]['suffix'] = filename[:l_bound].strip() + "... (" + diff.strip() + ")"
#if only some of the diff fits
else:
files_info[i]['suffix'] = filename[:l_bound].strip() + "... (" + diff[:h_bound-l_bound-6].strip() + ")"
return files_info
def str_to_number(string: str):
"""
Converts a string to a float or int if possible.
"""
try:
float(string)
try:
int(string)
return int(string)
except:
return float(string)
except:
return string
def extend_clip(clip: vs.VideoNode, frames: list):
"""
If a clip is shorter than the largest frame that needs to be rendered, extend it.
"""
if clip.num_frames < frames[-1]:
clip = clip + (vs.core.std.BlankClip(clip)[0] * (frames[-1] - clip.num_frames + 1))
return clip
def printwrap(text: str, width: int=os.get_terminal_size().columns, end: str="\n", *args, **kwargs):
"""
Prints text with smart wrapping using textwrap.fill().
:param text: Text to wrap and display.
:param width: Width of wrapping area, based on the terminal's size by default.
:param end: Standard param passed on to print().
Also passes along extra args to textwrap.fill().
"""
print(textwrap.fill(text, width, *args, **kwargs), end=end)
def run_comparison():
#START_TIME = time.time()
global first_file
first_file = None
#first file is only determined by analyze_clip if it is called
supported_extensions = ('.mkv', '.m2ts', '.mp4', '.webm', '.ogm', '.mpg', '.vob', '.iso', '.ts', '.mts', '.mov', '.qv', '.yuv',
'.flv', '.avi', '.rm', '.rmvb', '.m2v', '.m4v', '.mp2', '.mpeg', '.mpe', '.mpv', '.wmv', '.avc', '.hevc',
'.264', '.265', '.av1')
#find video files in the current directory, and exit if there are fewer than two
files = [file for file in os.listdir('.') if file.lower().endswith(supported_extensions)]
files = os_sorted(files)
file_count = len(files)
if file_count < 2:
sys.exit("Error: Fewer than 2 video files found in directory.")
#use anitopy library to get dictionary of show name, episode number, episode title, release group, etc
files_info = []
for file in files:
files_info.append(ani.parse(file))
anime_title = ""
anime_episode_number = ""
anime_episode_title = ""
#get anime title, episode number, and episode title
for dict in files_info:
if dict.get('anime_title') is not None and anime_title == "":
anime_title = dict.get('anime_title')
if dict.get('episode_number') is not None and anime_episode_number == "":
anime_episode_number = dict.get('episode_number')
if dict.get('episode_title') is not None and anime_episode_title == "":
anime_episode_title = dict.get('episode_title')
#what to name slow.pics collection
if anime_title != "" and anime_episode_number != "":
collection_name = anime_title.strip() + " - " + anime_episode_number.strip()
elif anime_title != "":
collection_name = anime_title.strip()
elif anime_episode_title != "":
collection_name = anime_episode_title.strip()
else:
collection_name = files_info[0].get('file_name')
collection_name = re.sub(r"\[.*?\]|\(.*?\}|\{.*?\}|\.[^.]+$", "", collection_name).strip()
#if anime title still isn't found, give it collection name
if anime_title == "":
anime_title = collection_name
#replace group or file names in trim_dict with file index
for d in [trim_dict, trim_dict_end, change_fps]:
for i in list(d):
if isinstance(i, str):
found = False
for dict in files_info:
if i == dict.get("release_group") or i == dict.get("file_name"): # or i in dict.get("file_name")
d[files_info.index(dict)] = d[i]
found = True
if found:
d.pop(i)
#detects and sets up change_fps "set" feature
if (list(change_fps.values())).count("set") > 0:
if (list(change_fps.values())).count("set") > 1:
sys.exit('Error: More than one change_fps file using "set".')
#if "set" is found, get the index of its file, get its fps, and set every other unspecified file to that fps
findex = list(change_fps.keys())[list(change_fps.values()).index("set")]
del change_fps[findex]
file = files[findex]
temp_clip = vs.core.lsmas.LWLibavSource(file)
fps = [temp_clip.fps_num, temp_clip.fps_den]
for i in range(0, len(files)):
if i not in change_fps:
change_fps[i] = fps
#if file is already set to certain fps, remove it from change_fps
for findex, file in enumerate(files):
temp_clip = init_clip(file, files, trim_dict, trim_dict_end)
if change_fps.get(findex) is not None:
if not isinstance(change_fps.get(findex), list):
sys.exit("Error: change_fps parameter only accepts lists as input")
if temp_clip.fps_num / temp_clip.fps_den == change_fps.get(findex)[0] / change_fps.get(findex)[1]:
del change_fps[findex]
#get display version of suffixes
get_suffixes(files_info, first_display=True)
#print list of files
print('\nFiles found: ')
for findex, file in enumerate(files):
groupname = files_info[findex].get("suffix")
if files_info[findex].get("release_group") != None:
#if group name is found, highlight
if groupname == files_info[findex].get("release_group"):
filename = files_info[findex].get("file_name").split(groupname)
filename = filename[0] + colorama.Fore.CYAN + groupname + colorama.Fore.YELLOW + filename[1]
#if group name with version number is found, highlight both group and version
elif (files_info[findex].get("release_group") + " v") in groupname:
v = groupname.rindex("v")
filename = files_info[findex].get("file_name").split(groupname[:v - 1])
filename = filename[0] + colorama.Fore.CYAN + groupname[:v - 1] + colorama.Fore.YELLOW + filename[1]
filename = filename.split(groupname[v:])
filename = filename[0] + colorama.Fore.CYAN + groupname[v:] + colorama.Fore.YELLOW + filename[1]
#if suffix is filename but group name found, still highlight
elif files_info[findex].get("release_group") in groupname:
filename = files_info[findex].get("file_name").split(files_info[findex].get("release_group"))
filename = filename[0] + colorama.Fore.CYAN + files_info[findex].get("release_group") + colorama.Fore.YELLOW + filename[1]
#if no group name is found, dont highlight
else:
filename = groupname
#output filenames
printwrap(colorama.Fore.YELLOW + " - " + filename + colorama.Style.RESET_ALL, subsequent_indent=" ")
#output which files will be trimmed
if trim_dict.get(findex) is not None:
if trim_dict.get(findex) >= 0:
printwrap(f" - Trimmed to start at frame {trim_dict.get(findex)}", subsequent_indent=" ")
elif trim_dict.get(findex) < 0:
printwrap(f" - {(trim_dict.get(findex) * -1)} frame(s) appended at start", subsequent_indent=" ")
if trim_dict_end.get(findex) is not None:
if trim_dict_end.get(findex) >= 0:
printwrap(f" - Trimmed to end at frame {trim_dict_end.get(findex)}", subsequent_indent=" ")
elif trim_dict_end.get(findex) < 0:
printwrap(f" - Trimmed to end {trim_dict_end.get(findex) * -1} frame(s) early", subsequent_indent=" ")
if change_fps.get(findex) is not None:
printwrap(f" - FPS changed to {change_fps.get(findex)[0]}/{change_fps.get(findex)[1]}", subsequent_indent=" ")
print()
#get version of suffixes that will be used in the rest of the file
get_suffixes(files_info, first_display=False)
#check if conflicting options are enabled
if (upscale and single_res > 0):
sys.exit("Error: Can't use 'upscale' and 'single_res' functions at the same time.")
frames = []
#add user specified frames to list
frames.extend(user_frames)
#if save_frames is enabled, store generated brightness data in a text file, so they don't have to be analyzed again
if save_frames and (frame_count_dark + frame_count_bright + frame_count_motion) > 0:
mismatch = False
#if frame file exists, read from it
if os.path.exists(frame_filename) and os.stat(frame_filename).st_size > 0:
printwrap(f'Reading data from "{frame_filename}"...')
with open(frame_filename) as frame_file:
generated_frames = frame_file.readlines()
#turn numbers into floats or ints, and get rid of newlines
for i, v in enumerate(generated_frames):
v = v.strip()
generated_frames[i] = str_to_number(v)
dark_list = generated_frames[generated_frames.index("dark:")+1:generated_frames.index("bright:")]
light_list = generated_frames[generated_frames.index("bright:")+1:generated_frames.index("motion:")]
motion_list = generated_frames[generated_frames.index("motion:")+1:]
analyzed_file = generated_frames[generated_frames.index("analyzed_file:") + 1]
analyzed_group = ani.parse(analyzed_file).get("release_group")
file_trim = generated_frames[generated_frames.index("analyzed_file_trim:") + 1]
file_trim_end = generated_frames[generated_frames.index("analyzed_file_trim:") + 2]
file_fps_num = generated_frames[generated_frames.index("analyzed_file_fps:") + 1]
file_fps_den = generated_frames[generated_frames.index("analyzed_file_fps:") + 2]
#check if a file with the same group name as the analyzed file is present in our current directory
group_found = False
for i, dict in enumerate(files_info):
if dict.get("release_group") is not None:
if dict.get("release_group").lower() == analyzed_group.lower():
group_found = True
group_file_index = files.index(dict.get("file_name"))
#if file wasn't found but group name was, set file with the same group name
if analyzed_file not in files and group_found is True:
analyzed_file = files[group_file_index]
#check if show name, episode number, or the release which was analyzed has changed
if (generated_frames[generated_frames.index("show_name:") + 1] != anime_title
or generated_frames[generated_frames.index("episode_num:") + 1] != int(anime_episode_number)
or analyzed_file not in files):
mismatch = True
#check if trim for analyzed file has changed
if mismatch == False:
found_trim = 0
found_trim_end = 0
if files.index(analyzed_file) in trim_dict:
found_trim = trim_dict.get(files.index(analyzed_file))
if files.index(analyzed_file) in trim_dict_end:
found_trim_end = trim_dict_end.get(files.index(analyzed_file))
if (file_trim != found_trim
or file_trim_end != found_trim_end):
mismatch = True
#check if fps of analyzed file has changed
if mismatch == False:
temp_clip = init_clip(analyzed_file, files, trim_dict, trim_dict_end, change_fps)
if file_fps_num / file_fps_den != temp_clip.fps_num / temp_clip.fps_den:
mismatch = True
#if mismatch is detected, re-analyze frames
if mismatch:
printwrap("\nParameters have changed. Will re-analyze brightness data.\n")
os.remove(frame_filename)
#only spend time processing lazylist if we need to
elif (frame_count_dark + frame_count_bright + frame_count_motion) > 0:
clip = init_clip(files[0], files, trim_dict, trim_dict_end, change_fps, analyze_clip, files_info)
frames.extend(lazylist(clip, frame_count_dark, frame_count_bright, frame_count_motion, frames, dark_list=dark_list, light_list=light_list, motion_list=motion_list, file=files[0], files=files, files_info=files_info))
#if frame file does not exist or has less frames than specified, write to it
if not os.path.exists(frame_filename) or os.stat(frame_filename).st_size == 0 or mismatch:
#if this is the first time first_file is being called, it will be evaluated. if not, it will already be known, since it's a global variable
first, first_file = init_clip(first_file, files, trim_dict, trim_dict_end, change_fps, analyze_clip, files_info, return_file=True)
#get the trim
first_trim = 0
first_trim_end = 0
if files.index(first_file) in trim_dict:
first_trim = trim_dict[files.index(first_file)]
if files.index(first_file) in trim_dict_end:
first_trim_end = trim_dict_end[files.index(first_file)]
frames_temp, dark_list, light_list, motion_list = lazylist(first, frame_count_dark, frame_count_bright, frame_count_motion, frames, save_frames=True, file=first_file, files=files, files_info=files_info)
frames.extend(frames_temp)
with open(frame_filename, 'w') as frame_file:
frame_file.write(f"show_name:\n{anime_title}\nepisode_num:\n{anime_episode_number}\nanalyzed_file:\n{first_file}\nanalyzed_file_trim:\n{first_trim}\n{first_trim_end}\nanalyzed_file_fps:\n{first.fps_num}\n{first.fps_den}\ndark:\n")
for val in dark_list:
frame_file.write(f"{val}\n")
frame_file.write("bright:\n")
for val in light_list:
frame_file.write(f"{val}\n")
frame_file.write("motion:\n")
for val in motion_list:
frame_file.write(f"{val}\n")
#if save_frames isn't enabled, run lazylist
elif (frame_count_dark + frame_count_bright + frame_count_motion) > 0:
first, first_file = init_clip(first_file, files, trim_dict, trim_dict_end, change_fps, analyze_clip, files_info, return_file=True)
frames.extend(lazylist(first, frame_count_dark, frame_count_bright, frame_count_motion, frames, file=first_file, files=files, files_info=files_info))
if random_frames > 0:
print("Getting random frames...\n")
#get list of all frames in clip
frame_ranges = list(range(0, init_clip(files[0], files, trim_dict, trim_dict_end, change_fps).num_frames))
#randomly selects frames at least screen_separation seconds apart
frame_ranges = dedupe(init_clip(files[0], files, trim_dict, trim_dict_end, change_fps), frame_ranges, random_frames, screen_separation, frames)
frames.extend(frame_ranges)
#remove dupes and sort
frames = [*set(frames)]
frames.sort()
#if no frames selected, terminate program
if len(frames) == 0:
sys.exit("Error: No frames have been selected, unable to proceed.")
#print comma separated list of which frames have been selected
print(f"Selected {len(frames)} frames:")
first = True
message = ""
for f in frames:
if not first:
message+=", "
first = False
message+=str(f)
printwrap(message, end="\n\n")
if upscale:
max_width, max_height, max_res_file = get_highest_res(files)
#create screenshot directory, if one already exists delete it first
screen_dir = pathlib.Path("./" + screen_dirname + "/")
if os.path.isdir(screen_dir):
shutil.rmtree(screen_dir)
os.mkdir(screen_dir)
#check if ffmpeg is available. if not, run script with ffmpeg disabled
global ffmpeg
if ffmpeg:
try:
subprocess.run(["ffmpeg", "-version"], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except:
ffmpeg = False
printwrap("FFmpeg was not found. Continuing to generate screens without it.")
print("Generating screenshots:")
#initialize progress bar, specify information to be output
#would use expand=True but the lazylist progress bar doesn't so i'd rather go for consistency
with Progress(TextColumn("{task.description}"), BarColumn(), TextColumn("{task.completed}/{task.total}"), TextColumn("{task.percentage:>3.02f}%"), TimeRemainingColumn()) as progress:
total_gen_progress = progress.add_task("[green]Total", total=len(frames) * len(files))
file_gen_progress = progress.add_task("", total=len(frames), visible=0)
for file in files:
findex = files.index(file)
clip = init_clip(file, files, trim_dict, trim_dict_end, change_fps)
#extend clip if a frame is out of range
clip = extend_clip(clip, frames)
#get release group or filename of file