Skip to content

Commit

Permalink
Merge branch 'main' of github.com:Tonks684/dlmbl_material
Browse files Browse the repository at this point in the history
  • Loading branch information
Tonks684 committed Aug 15, 2024
2 parents 445c95e + b2e7c39 commit ef4fa68
Show file tree
Hide file tree
Showing 3 changed files with 100 additions and 44 deletions.
53 changes: 38 additions & 15 deletions exercise.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@
"import matplotlib.pyplot as plt\n",
"from cellpose import models\n",
"from typing import List, Tuple\n",
"from numpy.typing import ArrayLike\n",
"import warnings\n",
"warnings.filterwarnings('ignore')\n",
"\n",
Expand Down Expand Up @@ -589,14 +590,14 @@
"outputs": [],
"source": [
"# Define a function to crop the images so we can zoom in.\n",
"def crop(img, crop_size, type=None):\n",
"def crop(img, crop_size, loc='center'):\n",
" \"\"\"\n",
" Crop the input image.\n",
"\n",
" Parameters:\n",
" img (ndarray): The image to be cropped.\n",
" crop_size (int): The size of the crop.\n",
" type (str): The type of crop to perform. Can be 'center' or 'random'.\n",
" loc (str): The type of crop to perform. Can be 'center' or 'random'.\n",
"\n",
" Returns:\n",
" ndarray: The cropped image array.\n",
Expand All @@ -608,18 +609,18 @@
" max_y = height - crop_size\n",
" max_x = max_y\n",
"\n",
" if type == 'random':\n",
" if loc == 'random':\n",
" start_y = np.random.randint(0, max_y + 1)\n",
" start_x = np.random.randint(0, max_x + 1)\n",
" end_y = start_y + crop_size\n",
" end_x = start_x + crop_size\n",
" elif type == 'center':\n",
" elif loc == 'center':\n",
" start_x = (width - crop_size) // 2\n",
" start_y = (height - crop_size) // 2\n",
" end_y = height - start_y\n",
" end_x = width - start_x\n",
" else:\n",
" raise ValueError(f'Unknown crop type {type}')\n",
" raise ValueError(f'Unknown crop type {loc}')\n",
"\n",
" # Crop array using slicing\n",
" crop_array = img[start_x:end_x, start_y:end_y]\n",
Expand Down Expand Up @@ -670,7 +671,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "abb07bea",
"id": "baf66869",
"metadata": {
"lines_to_next_cell": 0
},
Expand All @@ -683,12 +684,12 @@
"\n",
"# Create a dataframe to store the pixel-level metrics.\n",
"test_pixel_metrics = pd.DataFrame(\n",
" columns=[\"model\", \"fov\",\"pearson_nuc\", \"SSIM_nuc\", \"psnr_nuc\"]\n",
" columns=[\"model\", \"fov\",\"pearson_nuc\", \"ssim_nuc\", \"psnr_nuc\"]\n",
")\n",
"\n",
"# Compute the pixel-level metrics.\n",
"for i, (target_stain, predicted_stain) in tqdm(enumerate(zip(target_stains, virtual_stains))):\n",
" fov = virtual_stain_paths[i].splt(\"/\")[-1].split(\".\")[0]\n",
" fov = str(virtual_stain_paths[i]).split(\"/\")[-1].split(\".\")[0]\n",
" minmax_norm_target = min_max_scale(target_stain)\n",
" minmax_norm_predicted = min_max_scale(predicted_stain)\n",
" \n",
Expand All @@ -708,19 +709,41 @@
" test_pixel_metrics.loc[len(test_pixel_metrics)] = {\n",
" \"model\": \"pix2pixHD\",\n",
" \"fov\":fov,\n",
" \"Pearson_nuc\": pearson_nuc,\n",
" \"SSIM_nuc\": ssim_nuc,\n",
" \"PSNR_nuc\": psnr_nuc, \n",
" \"pearson_nuc\": pearson_nuc,\n",
" \"ssim_nuc\": ssim_nuc,\n",
" \"psnr_nuc\": psnr_nuc, \n",
" }\n",
" \n",
"test_pixel_metrics.boxplot(\n",
" column=[\"Pearson_nuc\", \"SSIM_nuc\"],\n",
" column=[\"pearson_nuc\", \"ssim_nuc\"],\n",
" rot=30,\n",
")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "721e9b16",
"metadata": {
"lines_to_next_cell": 0
},
"outputs": [],
"source": [
"test_pixel_metrics.boxplot(\n",
" column=[\"psnr_nuc\"],\n",
" rot=30,\n",
")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "abb07bea",
"metadata": {
"lines_to_next_cell": 0
},
"outputs": [],
"source": [
"test_pixel_metrics.head()"
]
},
Expand Down Expand Up @@ -796,7 +819,7 @@
"segmentation_results = ()\n",
"\n",
"for i, (target_stain, predicted_stain) in tqdm(enumerate(zip(target_stains, virtual_stains))):\n",
" fov = virtual_stain_paths[i].splt(\"/\")[-1].split(\".\")[0]\n",
" fov = str(virtual_stain_paths)[i].spilt(\"/\")[-1].split(\".\")[0]\n",
" minmax_norm_target = min_max_scale(target_stain)\n",
" minmax_norm_predicted = min_max_scale(predicted_stain)\n",
" # Compute the segmentation masks.\n",
Expand Down
9 changes: 7 additions & 2 deletions setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,13 @@ $ENV_PATH/bin/pip install "cellpose"
$ENV_PATH/bin/pip uninstall -y "opencv-python"
$ENV_PATH/bin/pip install "opencv-python==4.7.0.72"
$ENV_PATH/bin/pip install "ipykernel"
# Add conda environment to jupyter notebook
$ENV_PATH/bin/python -m ipykernel install --user --name 06_image_translation --display-name "06_image_translation"
$ENV_PATH/bin/pip install "pandas"
$ENV_PATH/bin/pip install "pillow"
$ENV_PATH/bin/pip install "matplotlib"
$ENV_PATH/bin/pip install -U "scikit-image"
$ENV_PATH/bin/pip install "tensorboard"
$ENV_PATH/bin/pip install "viscy"
$ENV_PATH/bin/pip install "torchmetrics[detection]"
# Download the weights and pretrained tensorboards
mkdir -p ~/data/06_image_translation/part2/model_weights
mkdir -p ~/data/06_image_translation/part2/model_tensorboard
Expand Down
82 changes: 55 additions & 27 deletions solution.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@
"import matplotlib.pyplot as plt\n",
"from cellpose import models\n",
"from typing import List, Tuple\n",
"from numpy.typing import ArrayLike\n",
"import warnings\n",
"warnings.filterwarnings('ignore')\n",
"\n",
Expand Down Expand Up @@ -589,14 +590,14 @@
"outputs": [],
"source": [
"# Define a function to crop the images so we can zoom in.\n",
"def crop(img, crop_size, type=None):\n",
"def crop(img, crop_size, loc='center'):\n",
" \"\"\"\n",
" Crop the input image.\n",
"\n",
" Parameters:\n",
" img (ndarray): The image to be cropped.\n",
" crop_size (int): The size of the crop.\n",
" type (str): The type of crop to perform. Can be 'center' or 'random'.\n",
" loc (str): The type of crop to perform. Can be 'center' or 'random'.\n",
"\n",
" Returns:\n",
" ndarray: The cropped image array.\n",
Expand All @@ -608,18 +609,18 @@
" max_y = height - crop_size\n",
" max_x = max_y\n",
"\n",
" if type == 'random':\n",
" if loc == 'random':\n",
" start_y = np.random.randint(0, max_y + 1)\n",
" start_x = np.random.randint(0, max_x + 1)\n",
" end_y = start_y + crop_size\n",
" end_x = start_x + crop_size\n",
" elif type == 'center':\n",
" elif loc == 'center':\n",
" start_x = (width - crop_size) // 2\n",
" start_y = (height - crop_size) // 2\n",
" end_y = height - start_y\n",
" end_x = width - start_x\n",
" else:\n",
" raise ValueError(f'Unknown crop type {type}')\n",
" raise ValueError(f'Unknown crop type {loc}')\n",
"\n",
" # Crop array using slicing\n",
" crop_array = img[start_x:end_x, start_y:end_y]\n",
Expand All @@ -629,7 +630,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "91890ccf",
"id": "cc247ac9",
"metadata": {
"lines_to_next_cell": 0,
"tags": [
Expand All @@ -643,7 +644,7 @@
"######## Solution ########\n",
"##########################\n",
"\n",
"def visualise_results(phase_images, target_stains, virtual_stains, crop_size=None, type='center'):\n",
"def visualise_results(phase_images, target_stains, virtual_stains, crop_size=None, loc='center'):\n",
" \"\"\"\n",
" Visualizes the results of image processing by displaying the phase images, target stains, and virtual stains.\n",
" Parameters:\n",
Expand All @@ -659,23 +660,28 @@
" sample_indices = np.random.choice(len(phase_images), 5)\n",
" for index,sample in enumerate(sample_indices):\n",
" if crop_size:\n",
" phase_images[index] = crop(phase_images[index], crop_size, type)\n",
" target_stains[index] = crop(target_stains[index], crop_size, type)\n",
" virtual_stains[index] = crop(virtual_stains[index], crop_size, type)\n",
" axes[index, 0].imshow(phase_images[index], cmap=\"gray\")\n",
" phase_image = crop(phase_images[index], crop_size, loc)\n",
" target_stain = crop(target_stains[index], crop_size, loc)\n",
" virtual_stain = crop(virtual_stains[index], crop_size, loc)\n",
" else:\n",
" phase_image = phase_images[index]\n",
" target_stain = target_stains[index]\n",
" virtual_stain = virtual_stains[index] \n",
"\n",
" axes[index, 0].imshow(phase_image, cmap=\"gray\")\n",
" axes[index, 0].set_title(\"Phase\")\n",
" axes[index, 1].imshow(\n",
" target_stains[index],\n",
" target_stain,\n",
" cmap=\"gray\",\n",
" vmin=np.percentile(target_stains[index], 1),\n",
" vmax=np.percentile(target_stains[index], 99),\n",
" vmin=np.percentile(target_stain, 1),\n",
" vmax=np.percentile(target_stain, 99),\n",
" )\n",
" axes[index, 1].set_title(\"Target Fluorescence \")\n",
" axes[index, 2].imshow(\n",
" virtual_stains[index],\n",
" virtual_stain,\n",
" cmap=\"gray\",\n",
" vmin=np.percentile(target_stains[index], 1),\n",
" vmax=np.percentile(target_stains[index], 99),\n",
" vmin=np.percentile(target_stain, 1),\n",
" vmax=np.percentile(target_stain, 99),\n",
" )\n",
" axes[index, 2].set_title(\"Virtual Stain\")\n",
" for ax in axes.flatten():\n",
Expand Down Expand Up @@ -710,7 +716,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "af1a13e0",
"id": "7ea32c4c",
"metadata": {
"lines_to_next_cell": 0
},
Expand All @@ -723,12 +729,12 @@
"\n",
"# Create a dataframe to store the pixel-level metrics.\n",
"test_pixel_metrics = pd.DataFrame(\n",
" columns=[\"model\", \"fov\",\"pearson_nuc\", \"SSIM_nuc\", \"psnr_nuc\"]\n",
" columns=[\"model\", \"fov\",\"pearson_nuc\", \"ssim_nuc\", \"psnr_nuc\"]\n",
")\n",
"\n",
"# Compute the pixel-level metrics.\n",
"for i, (target_stain, predicted_stain) in tqdm(enumerate(zip(target_stains, virtual_stains))):\n",
" fov = virtual_stain_paths[i].splt(\"/\")[-1].split(\".\")[0]\n",
" fov = str(virtual_stain_paths[i]).split(\"/\")[-1].split(\".\")[0]\n",
" minmax_norm_target = min_max_scale(target_stain)\n",
" minmax_norm_predicted = min_max_scale(predicted_stain)\n",
" \n",
Expand All @@ -748,19 +754,41 @@
" test_pixel_metrics.loc[len(test_pixel_metrics)] = {\n",
" \"model\": \"pix2pixHD\",\n",
" \"fov\":fov,\n",
" \"Pearson_nuc\": pearson_nuc,\n",
" \"SSIM_nuc\": ssim_nuc,\n",
" \"PSNR_nuc\": psnr_nuc, \n",
" \"pearson_nuc\": pearson_nuc,\n",
" \"ssim_nuc\": ssim_nuc,\n",
" \"psnr_nuc\": psnr_nuc, \n",
" }\n",
" \n",
"test_pixel_metrics.boxplot(\n",
" column=[\"Pearson_nuc\", \"SSIM_nuc\"],\n",
" column=[\"pearson_nuc\", \"ssim_nuc\"],\n",
" rot=30,\n",
")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e09b3843",
"metadata": {
"lines_to_next_cell": 0
},
"outputs": [],
"source": [
"test_pixel_metrics.boxplot(\n",
" column=[\"psnr_nuc\"],\n",
" rot=30,\n",
")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af1a13e0",
"metadata": {
"lines_to_next_cell": 0
},
"outputs": [],
"source": [
"test_pixel_metrics.head()"
]
},
Expand Down Expand Up @@ -836,7 +864,7 @@
"segmentation_results = ()\n",
"\n",
"for i, (target_stain, predicted_stain) in tqdm(enumerate(zip(target_stains, virtual_stains))):\n",
" fov = virtual_stain_paths[i].splt(\"/\")[-1].split(\".\")[0]\n",
" fov = str(virtual_stain_paths)[i].spilt(\"/\")[-1].split(\".\")[0]\n",
" minmax_norm_target = min_max_scale(target_stain)\n",
" minmax_norm_predicted = min_max_scale(predicted_stain)\n",
" # Compute the segmentation masks.\n",
Expand Down

0 comments on commit ef4fa68

Please sign in to comment.