diff --git a/README_us-EN.md b/README_us-EN.md
index 20177d88..fbc77553 100644
--- a/README_us-EN.md
+++ b/README_us-EN.md
@@ -38,6 +38,7 @@ English | [简体中文](README_zh-CN.md)
- Segment Anything Model
- [SAM](https://arxiv.org/abs/2304.02643): Universal model for natural image segmentation;
- [MobileSAM](https://arxiv.org/abs/2306.14289): Faster version of `SAM`;
+ - [SAM-Med2D](https://github.com/OpenGVLab/SAM-Med2D): 2D Universal model for medical image segmentation (🤗);
- [MedSAM](https://arxiv.org/abs/2304.12306): Universal model for medical image segmentation;
- [LVMSAM](https://arxiv.org/abs/2306.11925)
- [BUID](https://github.com/CVHub520/X-AnyLabeling/tree/main/assets/examples/buid): Ultrasound breast cancer segmentation model;
@@ -65,7 +66,7 @@ English | [简体中文](README_zh-CN.md)
### 1.1 Executable
-- Download and run the `GUI` version directly from [Baidu Cloud (Access Code: bkdj)](https://pan.baidu.com/s/1cJeRE2wdiYDy05pb5_JqYQ?pwd=bkdj).
+- Download and run the `GUI` version directly from [Release tag-v0.2.1](https://github.com/CVHub520/X-AnyLabeling/releases/tag/v0.2.1).
Note:
- For MacOS:
diff --git a/README_zh-CN.md b/README_zh-CN.md
index 46b0a104..9713f5e0 100644
--- a/README_zh-CN.md
+++ b/README_zh-CN.md
@@ -14,7 +14,7 @@
-[帮助文档](./docs/Q&A.md)
+👉[帮助文档](./docs/Q&A.md)👈
简体中文 | [English](README_us-EN.md)
@@ -41,6 +41,7 @@
- Segment Anything Model
- [SAM](https://arxiv.org/abs/2304.02643): 通用自然图像分割一切模型;
- [MobileSAM](https://arxiv.org/abs/2306.14289): 快速版 `SAM`;
+ - [SAM-Med2D](https://github.com/OpenGVLab/SAM-Med2D): 2D医学图像分割一切模型(🤗推荐);
- [MedSAM](https://arxiv.org/abs/2304.12306): 通用医学图像分割一切模型;
- [LVMSAM](https://arxiv.org/abs/2306.11925)
- [BUID](https://github.com/CVHub520/X-AnyLabeling/tree/main/assets/examples/buid): 超声乳腺癌分割模型;
@@ -69,7 +70,7 @@
### 1.1 可执行文件
-- 从[百度网盘(提取码: bkdj)](https://pan.baidu.com/s/1cJeRE2wdiYDy05pb5_JqYQ?pwd=bkdj)下载并运行`GUI`版本直接运行。
+- 从[百度网盘(提取码: cn0c)](https://pan.baidu.com/s/1HeEoVc-_DI5EuSU3MipqJQ?pwd=cn0c)下载并运行`GUI`版本直接运行。
注意:
- 对于MacOS:
diff --git a/anylabeling/app_info.py b/anylabeling/app_info.py
index 2c5dfd6c..376916d9 100644
--- a/anylabeling/app_info.py
+++ b/anylabeling/app_info.py
@@ -1,4 +1,4 @@
__appname__ = "X-AnyLabeling"
__appdescription__ = "Advanced Auto Labeling Solution with Added Features"
-__version__ = "0.2.0"
+__version__ = "0.2.1"
__preferred_device__ = "CPU" # GPU or CPU
diff --git a/anylabeling/configs/auto_labeling/models.yaml b/anylabeling/configs/auto_labeling/models.yaml
index 82c96c0b..58cb8982 100644
--- a/anylabeling/configs/auto_labeling/models.yaml
+++ b/anylabeling/configs/auto_labeling/models.yaml
@@ -1,3 +1,5 @@
+- model_name: "sam_med2d_vit_b-r20230901"
+ config_file: ":/sam_med2d_vit_b.yaml"
- model_name: "segment_anything_vit_b_quant-r20230810"
config_file: ":/segment_anything_vit_b_quant.yaml"
- model_name: "segment_anything_vit_b-r20230810"
@@ -28,6 +30,14 @@
config_file: ":/medsam_vit_b_quant.yaml"
- model_name: "mobile_sam_vit_h-r20230810"
config_file: ":/mobile_sam_vit_h.yaml"
+- model_name: "clrnet_tusimple_r18-r20230901"
+ config_file: ":/clrnet_tusimple_r18.yaml"
+- model_name: "yolox_l_dwpose_ucoco-r20230820"
+ config_file: ":/yolox_l_dwpose_ucoco.yaml"
+- model_name: "rtdetr_r50-r20230520"
+ config_file: ":/rtdetr_r50.yaml"
+- model_name: "yolov5s_resnet50-r20230520"
+ config_file: ":/yolov5s_resnet50.yaml"
- model_name: "yolov5n-r20230520"
config_file: ":/yolov5n.yaml"
- model_name: "yolov5s-r20230520"
@@ -81,12 +91,4 @@
- model_name: "yolo-nas-m-r20230615"
config_file: ":/yolo_nas_m.yaml"
- model_name: "yolo-nas-l-r20230615"
- config_file: ":/yolo_nas_l.yaml"
-- model_name: "yolov5s_resnet50-r20230520"
- config_file: ":/yolov5s_resnet50.yaml"
-- model_name: "rtdetr_r50-r20230520"
- config_file: ":/rtdetr_r50.yaml"
-- model_name: "yolox_l_dwpose_ucoco-r20230820"
- config_file: ":/yolox_l_dwpose_ucoco.yaml"
-- model_name: "clrnet_tusimple_r18-r20230901"
- config_file: ":/clrnet_tusimple_r18.yaml"
\ No newline at end of file
+ config_file: ":/yolo_nas_l.yaml"
\ No newline at end of file
diff --git a/anylabeling/configs/auto_labeling/sam_med2d_vit_b.yaml b/anylabeling/configs/auto_labeling/sam_med2d_vit_b.yaml
new file mode 100644
index 00000000..97cd545b
--- /dev/null
+++ b/anylabeling/configs/auto_labeling/sam_med2d_vit_b.yaml
@@ -0,0 +1,6 @@
+type: sam_med2d
+name: sam-med2d_vit_b-r20230901
+display_name: SAM-Med2D-256x (ViT-Base)
+encoder_model_path: https://github.com/CVHub520/X-AnyLabeling/releases/download/v0.2.0/sam-med2d_b.encoder.onnx
+decoder_model_path: https://github.com/CVHub520/X-AnyLabeling/releases/download/v0.2.0/sam-med2d_b.decoder.onnx
+input_size: 256
diff --git a/anylabeling/services/auto_labeling/model.py b/anylabeling/services/auto_labeling/model.py
index 74e17663..112953b4 100644
--- a/anylabeling/services/auto_labeling/model.py
+++ b/anylabeling/services/auto_labeling/model.py
@@ -30,7 +30,7 @@
class Model(QObject):
BASE_DOWNLOAD_URL = (
- "https://github.com/CVHub520/X-AnyLabeling/releases/tag/v0.2.0"
+ "https://github.com/CVHub520/X-AnyLabeling/releases/tag/v0.2.1"
)
class Meta(QObject):
diff --git a/anylabeling/services/auto_labeling/model_manager.py b/anylabeling/services/auto_labeling/model_manager.py
index f222be15..6f132c1c 100644
--- a/anylabeling/services/auto_labeling/model_manager.py
+++ b/anylabeling/services/auto_labeling/model_manager.py
@@ -165,7 +165,8 @@ def load_custom_model(self, config_file):
or "name" not in model_config
or model_config["type"]
not in [
- "segment_anything",
+ "segment_anything",
+ "sam_med2d",
"yolov5",
"yolov6",
"yolov7",
@@ -455,7 +456,30 @@ def _load_model(self, model_id):
)
)
return
+ # Request next files for prediction
+ self.request_next_files_requested.emit()
+ elif model_config["type"] == "sam_med2d":
+ from .sam_med2d import SAM_Med2D
+ try:
+ model_config["model"] = SAM_Med2D(
+ model_config, on_message=self.new_model_status.emit
+ )
+ self.auto_segmentation_model_selected.emit()
+ except Exception as e: # noqa
+ print(
+ "Error in loading model: {error_message}".format(
+ error_message=str(e)
+ )
+ )
+ self.new_model_status.emit(
+ self.tr(
+ "Error in loading model: {error_message}".format(
+ error_message=str(e)
+ )
+ )
+ )
+ return
# Request next files for prediction
self.request_next_files_requested.emit()
elif model_config["type"] == "yolov5_cls":
@@ -580,7 +604,7 @@ def set_auto_labeling_marks(self, marks):
"""
if (
self.loaded_model_config is None
- or self.loaded_model_config["type"] != "segment_anything"
+ or self.loaded_model_config["type"] not in ["segment_anything", "sam_med2d"]
):
return
self.loaded_model_config["model"].set_auto_labeling_marks(marks)
@@ -666,8 +690,8 @@ def on_next_files_changed(self, next_files):
if self.loaded_model_config is None:
return
- # Currently only segment_anything model supports this feature
- if self.loaded_model_config["type"] != "segment_anything":
+ # Currently only segment_anything-like model supports this feature
+ if self.loaded_model_config["type"] not in ["segment_anything", "sam_med2d"]:
return
self.loaded_model_config["model"].on_next_files_changed(next_files)
diff --git a/anylabeling/services/auto_labeling/sam_med2d.py b/anylabeling/services/auto_labeling/sam_med2d.py
new file mode 100644
index 00000000..c68e6717
--- /dev/null
+++ b/anylabeling/services/auto_labeling/sam_med2d.py
@@ -0,0 +1,439 @@
+import logging
+import os
+import traceback
+
+import cv2
+import numpy as np
+import onnxruntime as ort
+from copy import deepcopy
+from PyQt5 import QtCore
+from PyQt5.QtCore import QThread
+from PyQt5.QtCore import QCoreApplication
+
+from anylabeling.utils import GenericWorker
+from anylabeling.views.labeling.shape import Shape
+from anylabeling.views.labeling.utils.opencv import qt_img_to_rgb_cv_img
+
+from .lru_cache import LRUCache
+from .model import Model
+from .types import AutoLabelingResult
+
+class SegmentAnythingONNX:
+
+ def __init__(self, encoder_model_path, decoder_model_path) -> None:
+ # Basic hyp-parameters
+ self.pixel_mean = np.array([123.675, 116.28, 103.53])
+ self.pixel_std = np.array([58.395, 57.12, 57.375])
+
+ # Load models
+ providers = ort.get_available_providers()
+
+ # Pop TensorRT Runtime due to crashing issues
+ # TODO: Add back when TensorRT backend is stable
+ providers = [p for p in providers if p != "TensorrtExecutionProvider"]
+
+ if providers:
+ logging.info(
+ "Available providers for ONNXRuntime: %s", ", ".join(providers)
+ )
+ else:
+ logging.warning("No available providers for ONNXRuntime")
+
+ self.encoder_session = ort.InferenceSession(encoder_model_path, providers=providers)
+ self.decoder_session = ort.InferenceSession(decoder_model_path, providers=providers)
+
+ self.encoder_input_name = self.encoder_session.get_inputs()[0].name
+ self.encoder_input_shape = self.encoder_session.get_inputs()[0].shape
+ self.encoder_input_size = self.encoder_input_shape[-2:]
+
+ def run_encoder(self, encoder_inputs):
+ """Run encoder"""
+ output = self.encoder_session.run(None, encoder_inputs)
+ image_embedding = output[0]
+ return image_embedding
+
+ def transform(self, input_image: np.ndarray) -> np.ndarray:
+ """image transform
+
+ This function can convert the input image to the required input format for vit.
+
+ Args:
+ input_image (np.ndarray): input image, the image type should be RGB.
+
+ Returns:
+ np.ndarray: transformed image.
+ """
+ # Normalization
+ input_image = (input_image - self.pixel_mean) / self.pixel_std
+
+ # Resize
+ input_image = cv2.resize(input_image, self.encoder_input_size, cv2.INTER_NEAREST)
+
+ # HWC -> CHW
+ input_image = input_image.transpose((2, 0, 1))
+
+ # CHW -> NCHW
+ input_image = np.expand_dims(input_image, 0).astype(np.float32)
+
+ return input_image
+
+ def encode(self, cv_image):
+ """
+ Calculate embedding and metadata for a single image.
+ """
+ original_size = cv_image.shape[:2]
+ encoder_inputs = {
+ self.encoder_input_name: self.transform(cv_image),
+ }
+
+ image_embedding = self.run_encoder(encoder_inputs)
+ return {
+ "image_embedding": image_embedding,
+ "original_size": original_size,
+ }
+
+ def get_input_points(self, prompt):
+ """Get input points"""
+ points = []
+ labels = []
+ for mark in prompt:
+ if mark["type"] == "point":
+ points.append(mark["data"])
+ labels.append(mark["label"])
+ elif mark["type"] == "rectangle":
+ points.append([mark["data"][0], mark["data"][1]]) # top left
+ points.append([mark["data"][2], mark["data"][3]]) # bottom right
+ labels.append(2)
+ labels.append(3)
+ points, labels = np.array(points).astype(np.float32), np.array(labels).astype(np.float32)
+ return points, labels
+
+ def apply_coords(self, coords, original_size, new_size):
+ old_h, old_w = original_size
+ new_h, new_w = new_size
+ coords = deepcopy(coords).astype(float)
+ coords[..., 0] = coords[..., 0] * (new_w / old_w)
+ coords[..., 1] = coords[..., 1] * (new_h / old_h)
+ return coords
+
+ def run_decoder(self, image_embedding, original_size, prompt):
+ """Run decoder"""
+ point_coords, point_labels = self.get_input_points(prompt)
+
+ if point_coords is None or point_labels is None:
+ raise ValueError("Unable to segment, please input at least one box or point.")
+
+ h, w = self.encoder_input_size
+ if image_embedding.shape != (1, 256, int(h/16), int(w/16)):
+ raise ValueError("Got wrong embedding shape!")
+
+ has_mask_input = np.zeros(1, dtype=np.float32)
+ mask_input = np.zeros((1, 1, int(h/4), int(w/4)), dtype=np.float32)
+
+ if point_coords is not None:
+ if isinstance(point_coords, list):
+ point_coords = np.array(point_coords, dtype=np.float32)
+ if isinstance(point_labels, list):
+ point_labels = np.array(point_labels, dtype=np.float32)
+
+ if point_coords is not None:
+ point_coords = self.apply_coords(point_coords, original_size, self.encoder_input_size).astype(np.float32)
+ point_coords = np.expand_dims(point_coords, axis=0)
+ point_labels = np.expand_dims(point_labels, axis=0)
+
+ assert point_coords.shape[0] == 1 and point_coords.shape[-1] == 2
+ assert point_labels.shape[0] == 1
+ input_dict = {"image_embeddings": image_embedding,
+ "point_coords": point_coords,
+ "point_labels": point_labels,
+ "mask_input": mask_input,
+ "has_mask_input": has_mask_input,
+ "orig_im_size": np.array(original_size, dtype=np.float32)}
+ masks, _, _ = self.decoder_session.run(None, input_dict)
+
+ return masks
+
+ def predict_masks(self, embedding, prompt):
+ """
+ Predict masks for a single image.
+ """
+ masks = self.run_decoder(
+ embedding["image_embedding"],
+ embedding["original_size"],
+ prompt,
+ )
+
+ return masks
+
+class SAM_Med2D(Model):
+ """Segmentation model using SAM_Med2D"""
+
+ class Meta:
+ required_config_names = [
+ "type",
+ "name",
+ "display_name",
+ "encoder_model_path",
+ "decoder_model_path",
+ ]
+ widgets = [
+ "output_label",
+ "output_select_combobox",
+ "button_add_point",
+ "button_remove_point",
+ "button_add_rect",
+ "button_clear",
+ "button_finish_object",
+ ]
+ output_modes = {
+ "polygon": QCoreApplication.translate("Model", "Polygon"),
+ "rectangle": QCoreApplication.translate("Model", "Rectangle"),
+ }
+ default_output_mode = "polygon"
+
+ def __init__(self, config_path, on_message) -> None:
+ # Run the parent class's init method
+ super().__init__(config_path, on_message)
+ self.input_size = self.config["input_size"]
+
+ # Get encoder and decoder model paths
+ encoder_model_abs_path = self.get_model_abs_path(
+ self.config, "encoder_model_path"
+ )
+ if not encoder_model_abs_path or not os.path.isfile(
+ encoder_model_abs_path
+ ):
+ raise FileNotFoundError(
+ QCoreApplication.translate(
+ "Model",
+ "Could not download or initialize encoder of Segment Anything.",
+ )
+ )
+ decoder_model_abs_path = self.get_model_abs_path(
+ self.config, "decoder_model_path"
+ )
+ if not decoder_model_abs_path or not os.path.isfile(
+ decoder_model_abs_path
+ ):
+ raise FileNotFoundError(
+ QCoreApplication.translate(
+ "Model",
+ "Could not download or initialize decoder of Segment Anything.",
+ )
+ )
+
+ # Load models
+ self.model = SegmentAnythingONNX(
+ encoder_model_abs_path, decoder_model_abs_path
+ )
+
+ # Mark for auto labeling
+ # points, rectangles
+ self.marks = []
+
+ # Cache for image embedding
+ self.cache_size = 10
+ self.preloaded_size = self.cache_size - 3
+ self.image_embedding_cache = LRUCache(self.cache_size)
+
+ # Pre-inference worker
+ self.pre_inference_thread = None
+ self.pre_inference_worker = None
+ self.stop_inference = False
+
+ def set_auto_labeling_marks(self, marks):
+ """Set auto labeling marks"""
+ self.marks = marks
+
+ def post_process(self, masks):
+ """
+ Post process masks
+ """
+ # Find contours
+ masks[masks > 0.0] = 255
+ masks[masks <= 0.0] = 0
+ masks = masks.astype(np.uint8)
+ contours, _ = cv2.findContours(
+ masks, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
+ )
+
+ # Refine contours
+ approx_contours = []
+ for contour in contours:
+ # Approximate contour
+ epsilon = 0.001 * cv2.arcLength(contour, True)
+ approx = cv2.approxPolyDP(contour, epsilon, True)
+ approx_contours.append(approx)
+
+ # Remove too big contours ( >90% of image size)
+ if len(approx_contours) > 1:
+ image_size = masks.shape[0] * masks.shape[1]
+ areas = [cv2.contourArea(contour) for contour in approx_contours]
+ filtered_approx_contours = [
+ contour
+ for contour, area in zip(approx_contours, areas)
+ if area < image_size * 0.9
+ ]
+
+ # Remove small contours (area < 20% of average area)
+ if len(approx_contours) > 1:
+ areas = [cv2.contourArea(contour) for contour in approx_contours]
+ avg_area = np.mean(areas)
+
+ filtered_approx_contours = [
+ contour
+ for contour, area in zip(approx_contours, areas)
+ if area > avg_area * 0.2
+ ]
+ approx_contours = filtered_approx_contours
+
+ # Contours to shapes
+ shapes = []
+ if self.output_mode == "polygon":
+ for approx in approx_contours:
+ # Scale points
+ points = approx.reshape(-1, 2)
+ points[:, 0] = points[:, 0]
+ points[:, 1] = points[:, 1]
+ points = points.tolist()
+ if len(points) < 3:
+ continue
+ points.append(points[0])
+
+ # Create shape
+ shape = Shape(flags={})
+ for point in points:
+ point[0] = int(point[0])
+ point[1] = int(point[1])
+ shape.add_point(QtCore.QPointF(point[0], point[1]))
+ shape.shape_type = "polygon"
+ shape.closed = True
+ shape.fill_color = "#000000"
+ shape.line_color = "#000000"
+ shape.line_width = 1
+ shape.label = "AUTOLABEL_OBJECT"
+ shape.selected = False
+ shapes.append(shape)
+ elif self.output_mode == "rectangle":
+ x_min = 100000000
+ y_min = 100000000
+ x_max = 0
+ y_max = 0
+ for approx in approx_contours:
+ # Scale points
+ points = approx.reshape(-1, 2)
+ points[:, 0] = points[:, 0]
+ points[:, 1] = points[:, 1]
+ points = points.tolist()
+ if len(points) < 3:
+ continue
+
+ # Get min/max
+ for point in points:
+ x_min = min(x_min, point[0])
+ y_min = min(y_min, point[1])
+ x_max = max(x_max, point[0])
+ y_max = max(y_max, point[1])
+
+ # Create shape
+ shape = Shape(flags={})
+ shape.add_point(QtCore.QPointF(x_min, y_min))
+ shape.add_point(QtCore.QPointF(x_max, y_max))
+ shape.shape_type = "rectangle"
+ shape.closed = True
+ shape.fill_color = "#000000"
+ shape.line_color = "#000000"
+ shape.line_width = 1
+ shape.label = "AUTOLABEL_OBJECT"
+ shape.selected = False
+ shapes.append(shape)
+
+ return shapes
+
+ def predict_shapes(self, image, filename=None) -> AutoLabelingResult:
+ """
+ Predict shapes from image
+ """
+
+ if image is None or not self.marks:
+ return AutoLabelingResult([], replace=False)
+
+ shapes = []
+ try:
+ # Use cached image embedding if possible
+ cached_data = self.image_embedding_cache.get(filename)
+ if cached_data is not None:
+ image_embedding = cached_data
+ else:
+ cv_image = qt_img_to_rgb_cv_img(image, filename)
+ if self.stop_inference:
+ return AutoLabelingResult([], replace=False)
+ image_embedding = self.model.encode(cv_image)
+ self.image_embedding_cache.put(
+ filename,
+ image_embedding,
+ )
+ if self.stop_inference:
+ return AutoLabelingResult([], replace=False)
+ masks = self.model.predict_masks(image_embedding, self.marks)
+ if len(masks.shape) == 4:
+ masks = masks[0][0]
+ else:
+ masks = masks[0]
+ shapes = self.post_process(masks)
+ except Exception as e: # noqa
+ logging.warning("Could not inference model")
+ logging.warning(e)
+ traceback.print_exc()
+ return AutoLabelingResult([], replace=False)
+
+ result = AutoLabelingResult(shapes, replace=False)
+ return result
+
+ def unload(self):
+ self.stop_inference = True
+ if self.pre_inference_thread:
+ self.pre_inference_thread.quit()
+
+ def preload_worker(self, files):
+ """
+ Preload next files, run inference and cache results
+ """
+ files = files[: self.preloaded_size]
+ for filename in files:
+ if self.image_embedding_cache.find(filename):
+ continue
+ image = self.load_image_from_filename(filename)
+ if image is None:
+ continue
+ if self.stop_inference:
+ return
+ cv_image = qt_img_to_rgb_cv_img(image)
+ image_embedding = self.model.encode(cv_image)
+ self.image_embedding_cache.put(
+ filename,
+ image_embedding,
+ )
+
+ def on_next_files_changed(self, next_files):
+ """
+ Handle next files changed. This function can preload next files
+ and run inference to save time for user.
+ """
+ if (
+ self.pre_inference_thread is None
+ or not self.pre_inference_thread.isRunning()
+ ):
+ self.pre_inference_thread = QThread()
+ self.pre_inference_worker = GenericWorker(
+ self.preload_worker, next_files
+ )
+ self.pre_inference_worker.finished.connect(
+ self.pre_inference_thread.quit
+ )
+ self.pre_inference_worker.moveToThread(self.pre_inference_thread)
+ self.pre_inference_thread.started.connect(
+ self.pre_inference_worker.run
+ )
+ self.pre_inference_thread.start()
+
diff --git a/anylabeling/services/auto_labeling/segment_anything.py b/anylabeling/services/auto_labeling/segment_anything.py
index 5eb1337c..1b5f0704 100644
--- a/anylabeling/services/auto_labeling/segment_anything.py
+++ b/anylabeling/services/auto_labeling/segment_anything.py
@@ -209,9 +209,10 @@ def predict_shapes(self, image, filename=None) -> AutoLabelingResult:
"""
Predict shapes from image
"""
+ print(f"image={image}, self.marks={self.marks}")
if image is None or not self.marks:
return AutoLabelingResult([], replace=False)
-
+
shapes = []
try:
# Use cached image embedding if possible
@@ -289,4 +290,5 @@ def on_next_files_changed(self, next_files):
self.pre_inference_thread.started.connect(
self.pre_inference_worker.run
)
- self.pre_inference_thread.start()
\ No newline at end of file
+ self.pre_inference_thread.start()
+
diff --git a/assets/examples/lane/lane_demo.json b/assets/examples/lane/lane_demo.json
new file mode 100644
index 00000000..6e13c5ad
--- /dev/null
+++ b/assets/examples/lane/lane_demo.json
@@ -0,0 +1,1401 @@
+{
+ "version": "0.2.0",
+ "flags": {},
+ "shapes": [
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 227.0,
+ 710.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 236.0,
+ 700.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 245.0,
+ 690.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 254.0,
+ 680.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 264.0,
+ 670.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 273.0,
+ 660.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 282.0,
+ 650.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 291.0,
+ 640.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 301.0,
+ 630.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 310.0,
+ 620.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 319.0,
+ 610.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 328.0,
+ 600.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 338.0,
+ 590.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 347.0,
+ 580.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 356.0,
+ 570.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 365.0,
+ 560.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 375.0,
+ 550.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 384.0,
+ 540.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 393.0,
+ 530.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 403.0,
+ 520.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 412.0,
+ 510.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 421.0,
+ 500.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 430.0,
+ 490.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 440.0,
+ 480.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 449.0,
+ 470.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 458.0,
+ 459.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 467.0,
+ 450.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 477.0,
+ 440.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 486.0,
+ 430.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 495.0,
+ 420.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 505.0,
+ 410.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 514.0,
+ 400.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 523.0,
+ 390.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 533.0,
+ 380.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 542.0,
+ 369.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 552.0,
+ 360.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 561.0,
+ 350.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 570.0,
+ 340.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 580.0,
+ 330.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 589.0,
+ 320.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 599.0,
+ 310.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 608.0,
+ 300.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 618.0,
+ 290.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 627.0,
+ 280.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 637.0,
+ 270.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane1",
+ "text": "",
+ "points": [
+ [
+ 646.0,
+ 260.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 1252.0,
+ 420.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 1218.0,
+ 410.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 1185.0,
+ 400.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 1151.0,
+ 390.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 1117.0,
+ 380.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 1084.0,
+ 369.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 1050.0,
+ 360.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 1016.0,
+ 350.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 982.0,
+ 340.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 949.0,
+ 330.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 915.0,
+ 320.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 881.0,
+ 310.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 848.0,
+ 300.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 814.0,
+ 290.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 781.0,
+ 280.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane2",
+ "text": "",
+ "points": [
+ [
+ 748.0,
+ 270.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1268.0,
+ 700.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1255.0,
+ 690.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1242.0,
+ 680.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1228.0,
+ 670.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1215.0,
+ 660.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1202.0,
+ 650.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1188.0,
+ 640.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1175.0,
+ 630.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1162.0,
+ 620.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1148.0,
+ 610.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1135.0,
+ 600.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1122.0,
+ 590.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1108.0,
+ 580.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1095.0,
+ 570.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1082.0,
+ 560.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1068.0,
+ 550.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1055.0,
+ 540.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1042.0,
+ 530.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1028.0,
+ 520.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1015.0,
+ 510.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 1002.0,
+ 500.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 988.0,
+ 490.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 975.0,
+ 480.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 962.0,
+ 470.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 948.0,
+ 459.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 935.0,
+ 450.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 922.0,
+ 440.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 908.0,
+ 430.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 895.0,
+ 420.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 882.0,
+ 410.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 868.0,
+ 400.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 855.0,
+ 390.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 842.0,
+ 380.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 828.0,
+ 369.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 815.0,
+ 360.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 801.0,
+ 350.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 788.0,
+ 340.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 774.0,
+ 330.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 761.0,
+ 320.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 748.0,
+ 310.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 734.0,
+ 300.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 721.0,
+ 290.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 708.0,
+ 280.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 695.0,
+ 270.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ },
+ {
+ "label": "lane3",
+ "text": "",
+ "points": [
+ [
+ 682.0,
+ 260.0
+ ]
+ ],
+ "group_id": null,
+ "shape_type": "point",
+ "flags": {}
+ }
+ ],
+ "imagePath": "lane_demo.jpg",
+ "imageData": null,
+ "imageHeight": 720,
+ "imageWidth": 1280
+}
\ No newline at end of file
diff --git a/scripts/zip_models.py b/scripts/zip_models.py
index d5fabc4d..d03c4d44 100644
--- a/scripts/zip_models.py
+++ b/scripts/zip_models.py
@@ -29,7 +29,7 @@ def get_filename_from_url(url):
model_config = yaml.load(
open(model_config_path + config_file, "r"), Loader=yaml.FullLoader
)
- if model_config["type"] == "segment_anything":
+ if model_config["type"] in ["segment_anything", "sam_med2d"]:
download_links.append(model_config["encoder_model_path"])
download_links.append(model_config["decoder_model_path"])
else:
@@ -40,7 +40,7 @@ def get_filename_from_url(url):
# Save model config
# Rewrite model's urls
- if model_config["type"] == "segment_anything":
+ if model_config["type"] in ["segment_anything", "sam_med2d"]:
model_config["encoder_model_path"] = get_filename_from_url(
model_config["encoder_model_path"]
)