Skip to content

Commit

Permalink
Commit from GitHub Actions (Build Notebooks)
Browse files Browse the repository at this point in the history
  • Loading branch information
Tonks684 committed Aug 20, 2024
1 parent 97fe2a6 commit af5187a
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 60 deletions.
44 changes: 14 additions & 30 deletions exercise.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,6 @@
"id": "d13cbb1a",
"metadata": {
"cell_marker": "\"\"\"",
"lines_to_next_cell": 0,
"title": "<a ></a>"
},
"source": [
Expand All @@ -407,24 +406,11 @@
"cell_type": "code",
"execution_count": null,
"id": "99c8ab80",
"metadata": {
"lines_to_next_cell": 0
},
"metadata": {},
"outputs": [],
"source": [
"log_dir = f\"{top_dir}/model_tensorboard/{opt.name}/\"\n",
"%reload_ext tensorboard"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f10b40a0",
"metadata": {
"lines_to_next_cell": 0
},
"outputs": [],
"source": [
"%reload_ext tensorboard\n",
"%tensorboard --logdir $log_dir"
]
},
Expand Down Expand Up @@ -834,7 +820,6 @@
" device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
" cp_nuc_kwargs = {\n",
" \"diameter\": 65,\n",
" \"channels\": [0], \n",
" \"cellprob_threshold\": 0.0, \n",
" }\n",
" cellpose_model = models.CellposeModel(\n",
Expand Down Expand Up @@ -911,19 +896,20 @@
"outputs": [],
"source": [
"# Define function to visualize the segmentation results.\n",
"def visualise_results_and_masks(segmentation_results, test_segmentation_metrics: Tuple[dict], rows: int = 5, crop_size: int = None, crop_type: str = 'center'):\n",
"def visualise_results_and_masks(segmentation_results: Tuple[dict], segmentation_metrics: pd.DataFrame, rows: int = 5, crop_size: int = None, crop_type: str = 'center'):\n",
"\n",
" # Sample a subset of the segmentation results.\n",
" sample_indices = np.random.choice(len(phase_images),rows)\n",
" segmentation_metrics_subset = segmentation_metrics_subset.iloc[sample_indices,:]\n",
" print(sample_indices)\n",
" segmentation_metrics = segmentation_metrics.iloc[sample_indices,:]\n",
" segmentation_results = [segmentation_results[i] for i in sample_indices]\n",
" # Define the figure and axes.\n",
" fig, axes = plt.subplots(rows, 5, figsize=(rows*3, 15))\n",
"\n",
" # Visualize the segmentation results.\n",
" for i, idx in enumerate(test_segmentation_metrics):\n",
" result = segmentation_results[idx]\n",
" segmentation_metrics = segmentation_metrics_subset.iloc[i]\n",
" for i in range(len((segmentation_results))):\n",
" segmentation_metric = segmentation_metrics.iloc[i]\n",
" result = segmentation_results[i]\n",
" phase_image = result[\"phase_image\"]\n",
" target_stain = result[\"target_stain\"]\n",
" target_label = result[\"target_label\"]\n",
Expand All @@ -936,6 +922,7 @@
" target_label = crop(target_label, crop_size, crop_type)\n",
" pred_stain = crop(pred_stain, crop_size, crop_type)\n",
" pred_label = crop(pred_label, crop_size, crop_type)\n",
" \n",
" axes[i, 0].imshow(phase_image, cmap=\"gray\")\n",
" axes[i, 0].set_title(\"Phase\")\n",
" axes[i, 1].imshow(\n",
Expand All @@ -951,15 +938,15 @@
" axes[i, 3].set_title(\"Target Fluorescence Mask\")\n",
" axes[i, 4].imshow(pred_label, cmap=\"inferno\")\n",
" # Add Metric values to the title\n",
" axes[i, 4].set_title(f\"Virtual Stain Mask\\nAcc:{segmentation_metrics['accuracy']:.2f} Dice:{segmentation_metrics['dice']:.2f} Jaccard:{segmentation_metrics['jaccard']:.2f} MAP:{segmentation_metrics['mAP']:.2f}\")\n",
" axes[i, 4].set_title(f\"Virtual Stain Mask\\nAcc:{segmentation_metric['accuracy']:.2f} Dice:{segmentation_metric['dice']:.2f}\\nJaccard:{segmentation_metric['jaccard']:.2f} MAP:{segmentation_metric['mAP']:.2f}\")\n",
" # Turn off the axes.\n",
" for ax in axes.flatten():\n",
" ax.axis(\"off\")\n",
"\n",
" plt.tight_layout()\n",
" plt.show()\n",
" \n",
"visualise_results_and_masks(test_segmentation_metrics, crop_size=256, crop_type='center')"
"visualise_results_and_masks(segmentation_results,test_segmentation_metrics, crop_size=256, crop_type='center')"
]
},
{
Expand Down Expand Up @@ -1082,7 +1069,7 @@
"execution_count": null,
"id": "a94162a7",
"metadata": {
"lines_to_next_cell": 0
"lines_to_next_cell": 1
},
"outputs": [],
"source": [
Expand All @@ -1095,9 +1082,7 @@
"for index, sample_path in tqdm(enumerate(samples)):\n",
" sample_image = imread(sample_path)\n",
" # Append the images to the arrays.\n",
" sample_images[index] = sample_image\n",
"\n",
"# Plot the phase image, the target image, the variance of samples and 3 samples"
" sample_images[index] = sample_image"
]
},
{
Expand Down Expand Up @@ -1142,8 +1127,7 @@
" \n",
"## Checkpoint 5\n",
"\n",
"Congratulations! You have generated predictions from a pre-trained model and evaluated the performance of the model on unseen data. You have computed pixel-level metrics and instance-level metrics to evaluate the performance of the model. You may have also began training your own Pix2PixHD GAN models with alternative hyperparameters.\n",
"\n",
"Congratulations! This is the end of the conditional generative modelling approach to image translation notebook. You have trained and examined the loss components of Pix2PixHD GAN. You have compared the results of a regression-based approach vs. generative modelling approach and explored the variability in virtual staining solutions. I hope you have enjoyed learning experience!\n",
"</div>"
]
}
Expand Down
44 changes: 14 additions & 30 deletions solution.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,6 @@
"id": "9f60283b",
"metadata": {
"cell_marker": "\"\"\"",
"lines_to_next_cell": 0,
"title": "<a ></a>"
},
"source": [
Expand All @@ -407,24 +406,11 @@
"cell_type": "code",
"execution_count": null,
"id": "193d455c",
"metadata": {
"lines_to_next_cell": 0
},
"metadata": {},
"outputs": [],
"source": [
"log_dir = f\"{top_dir}/model_tensorboard/{opt.name}/\"\n",
"%reload_ext tensorboard"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "97a3ed01",
"metadata": {
"lines_to_next_cell": 0
},
"outputs": [],
"source": [
"%reload_ext tensorboard\n",
"%tensorboard --logdir $log_dir"
]
},
Expand Down Expand Up @@ -879,7 +865,6 @@
" device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
" cp_nuc_kwargs = {\n",
" \"diameter\": 65,\n",
" \"channels\": [0], \n",
" \"cellprob_threshold\": 0.0, \n",
" }\n",
" cellpose_model = models.CellposeModel(\n",
Expand Down Expand Up @@ -956,19 +941,20 @@
"outputs": [],
"source": [
"# Define function to visualize the segmentation results.\n",
"def visualise_results_and_masks(segmentation_results, test_segmentation_metrics: Tuple[dict], rows: int = 5, crop_size: int = None, crop_type: str = 'center'):\n",
"def visualise_results_and_masks(segmentation_results: Tuple[dict], segmentation_metrics: pd.DataFrame, rows: int = 5, crop_size: int = None, crop_type: str = 'center'):\n",
"\n",
" # Sample a subset of the segmentation results.\n",
" sample_indices = np.random.choice(len(phase_images),rows)\n",
" segmentation_metrics_subset = segmentation_metrics_subset.iloc[sample_indices,:]\n",
" print(sample_indices)\n",
" segmentation_metrics = segmentation_metrics.iloc[sample_indices,:]\n",
" segmentation_results = [segmentation_results[i] for i in sample_indices]\n",
" # Define the figure and axes.\n",
" fig, axes = plt.subplots(rows, 5, figsize=(rows*3, 15))\n",
"\n",
" # Visualize the segmentation results.\n",
" for i, idx in enumerate(test_segmentation_metrics):\n",
" result = segmentation_results[idx]\n",
" segmentation_metrics = segmentation_metrics_subset.iloc[i]\n",
" for i in range(len((segmentation_results))):\n",
" segmentation_metric = segmentation_metrics.iloc[i]\n",
" result = segmentation_results[i]\n",
" phase_image = result[\"phase_image\"]\n",
" target_stain = result[\"target_stain\"]\n",
" target_label = result[\"target_label\"]\n",
Expand All @@ -981,6 +967,7 @@
" target_label = crop(target_label, crop_size, crop_type)\n",
" pred_stain = crop(pred_stain, crop_size, crop_type)\n",
" pred_label = crop(pred_label, crop_size, crop_type)\n",
" \n",
" axes[i, 0].imshow(phase_image, cmap=\"gray\")\n",
" axes[i, 0].set_title(\"Phase\")\n",
" axes[i, 1].imshow(\n",
Expand All @@ -996,15 +983,15 @@
" axes[i, 3].set_title(\"Target Fluorescence Mask\")\n",
" axes[i, 4].imshow(pred_label, cmap=\"inferno\")\n",
" # Add Metric values to the title\n",
" axes[i, 4].set_title(f\"Virtual Stain Mask\\nAcc:{segmentation_metrics['accuracy']:.2f} Dice:{segmentation_metrics['dice']:.2f} Jaccard:{segmentation_metrics['jaccard']:.2f} MAP:{segmentation_metrics['mAP']:.2f}\")\n",
" axes[i, 4].set_title(f\"Virtual Stain Mask\\nAcc:{segmentation_metric['accuracy']:.2f} Dice:{segmentation_metric['dice']:.2f}\\nJaccard:{segmentation_metric['jaccard']:.2f} MAP:{segmentation_metric['mAP']:.2f}\")\n",
" # Turn off the axes.\n",
" for ax in axes.flatten():\n",
" ax.axis(\"off\")\n",
"\n",
" plt.tight_layout()\n",
" plt.show()\n",
" \n",
"visualise_results_and_masks(test_segmentation_metrics, crop_size=256, crop_type='center')"
"visualise_results_and_masks(segmentation_results,test_segmentation_metrics, crop_size=256, crop_type='center')"
]
},
{
Expand Down Expand Up @@ -1160,7 +1147,7 @@
"execution_count": null,
"id": "d359ea46",
"metadata": {
"lines_to_next_cell": 0
"lines_to_next_cell": 1
},
"outputs": [],
"source": [
Expand All @@ -1173,9 +1160,7 @@
"for index, sample_path in tqdm(enumerate(samples)):\n",
" sample_image = imread(sample_path)\n",
" # Append the images to the arrays.\n",
" sample_images[index] = sample_image\n",
"\n",
"# Plot the phase image, the target image, the variance of samples and 3 samples"
" sample_images[index] = sample_image"
]
},
{
Expand Down Expand Up @@ -1220,8 +1205,7 @@
" \n",
"## Checkpoint 5\n",
"\n",
"Congratulations! You have generated predictions from a pre-trained model and evaluated the performance of the model on unseen data. You have computed pixel-level metrics and instance-level metrics to evaluate the performance of the model. You may have also began training your own Pix2PixHD GAN models with alternative hyperparameters.\n",
"\n",
"Congratulations! This is the end of the conditional generative modelling approach to image translation notebook. You have trained and examined the loss components of Pix2PixHD GAN. You have compared the results of a regression-based approach vs. generative modelling approach and explored the variability in virtual staining solutions. I hope you have enjoyed learning experience!\n",
"</div>"
]
}
Expand Down

0 comments on commit af5187a

Please sign in to comment.