From 3c891ec76d26e437b7b26387589fb9dc40f5e2f8 Mon Sep 17 00:00:00 2001
From: YunLiu <55491388+KumoLiu@users.noreply.github.com>
Date: Sat, 7 Sep 2024 19:05:05 +0800
Subject: [PATCH] Update google link to use shared drive (#1819)

Update google link to use shared drive

### Checks
<!--- Put an `x` in all the boxes that apply, and remove the not
applicable items -->
- [ ] Avoid including large-size files in the PR.
- [ ] Clean up long text outputs from code cells in the notebook.
- [ ] For security purposes, please check the contents and remove any
sensitive info such as user names and private key.
- [ ] Ensure (1) hyperlinks and markdown anchors are working (2) use
relative paths for tutorial repo files (3) put figure and graphs in the
`./figure` folder
- [ ] Notebook runs automatically `./runner.sh -t <path to .ipynb file>`

---------

Signed-off-by: YunLiu <55491388+KumoLiu@users.noreply.github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---
 .../densenet_training_array.ipynb             |  2 +-
 3d_regression/densenet_training_array.ipynb   |  2 +-
 .../swin_unetr_brats21_segmentation_3d.ipynb  |  4 ++--
 .../swin_unetr_btcv_segmentation_3d.ipynb     |  2 +-
 .../vista3d/vista3d_spleen_finetune.ipynb     |  2 +-
 ...reprocess_to_build_detection_dataset.ipynb |  2 +-
 deployment/ray/mednist_classifier_ray.ipynb   |  2 +-
 detection/README.md                           |  2 +-
 .../breast_density_challenge/data/README.md   |  2 +-
 .../run_docker.md                             |  2 +-
 generation/maisi/data/README.md               |  2 +-
 .../maisi/maisi_inference_tutorial.ipynb      | 22 ++++++++++++-------
 generation/maisi/scripts/inference.py         | 16 +++++++-------
 .../benchmark_global_mutual_information.ipynb |  2 +-
 modules/engines/gan_training.py               |  2 +-
 modules/public_datasets.ipynb                 |  2 +-
 modules/resample_benchmark.ipynb              |  2 +-
 ..._wholebody_totalSegmentator_3dslicer.ipynb |  2 +-
 .../README.md                                 |  2 +-
 .../multiple_instance_learning/README.md      |  2 +-
 .../panda_mil_train_evaluate_pytorch_gpu.py   |  2 +-
 pathology/tumor_detection/README.MD           |  4 ++--
 .../ignite/profiling_camelyon_pipeline.ipynb  |  8 +++----
 .../pathology/profiling_train_base_nvtx.md    |  4 ++--
 vista_2d/vista_2d_tutorial_monai.ipynb        |  2 +-
 25 files changed, 51 insertions(+), 45 deletions(-)

diff --git a/3d_classification/densenet_training_array.ipynb b/3d_classification/densenet_training_array.ipynb
index 3eff90ace4..2863cd92a0 100644
--- a/3d_classification/densenet_training_array.ipynb
+++ b/3d_classification/densenet_training_array.ipynb
@@ -200,7 +200,7 @@
    ],
    "source": [
     "if not os.path.isfile(images[0]):\n",
-    "    resource = \"https://drive.google.com/file/d/1f5odq9smadgeJmDeyEy_UOjEtE_pkKc0/view?usp=sharing\"\n",
+    "    resource = \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/IXI-T1.tar\"\n",
     "    md5 = \"34901a0593b41dd19c1a1f746eac2d58\"\n",
     "\n",
     "    dataset_dir = os.path.join(root_dir, \"ixi\")\n",
diff --git a/3d_regression/densenet_training_array.ipynb b/3d_regression/densenet_training_array.ipynb
index 0b330129ad..b9bc311060 100644
--- a/3d_regression/densenet_training_array.ipynb
+++ b/3d_regression/densenet_training_array.ipynb
@@ -205,7 +205,7 @@
    "outputs": [],
    "source": [
     "if not os.path.isfile(images[0]):\n",
-    "    resource = \"https://drive.google.com/file/d/1f5odq9smadgeJmDeyEy_UOjEtE_pkKc0/view?usp=sharing\"\n",
+    "    resource = \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/IXI-T1.tar\"\n",
     "    md5 = \"34901a0593b41dd19c1a1f746eac2d58\"\n",
     "\n",
     "    dataset_dir = os.path.join(root_dir, \"ixi\")\n",
diff --git a/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb b/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb
index fd21efc9f7..65554f91a1 100644
--- a/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb
+++ b/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb
@@ -45,7 +45,7 @@
     "\n",
     "https://www.synapse.org/#!Synapse:syn27046444/wiki/616992\n",
     "\n",
-    "The JSON file containing training and validation sets (internal split) needs to be downloaded from this [link](https://drive.google.com/file/d/1i-BXYe-wZ8R9Vp3GXoajGyqaJ65Jybg1/view?usp=sharing) and placed in the same folder as the dataset. As discussed in the following, this tutorial uses fold 1 for training a Swin UNETR model on the BraTS 21 challenge.\n",
+    "The JSON file containing training and validation sets (internal split) needs to be downloaded from this [link](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/brats21_folds.json) and placed in the same folder as the dataset. As discussed in the following, this tutorial uses fold 1 for training a Swin UNETR model on the BraTS 21 challenge.\n",
     "\n",
     "### Tumor Characteristics\n",
     "\n",
@@ -114,7 +114,7 @@
     "  \"TrainingData/BraTS2021_01146/BraTS2021_01146_flair.nii.gz\"\n",
     "  \n",
     "\n",
-    "- Download the json file from this [link](https://drive.google.com/file/d/1i-BXYe-wZ8R9Vp3GXoajGyqaJ65Jybg1/view?usp=sharing) and placed in the same folder as the dataset.\n"
+    "- Download the json file from this [link](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/brats21_folds.json) and placed in the same folder as the dataset.\n"
    ]
   },
   {
diff --git a/3d_segmentation/swin_unetr_btcv_segmentation_3d.ipynb b/3d_segmentation/swin_unetr_btcv_segmentation_3d.ipynb
index 0d353f3404..18550d5521 100644
--- a/3d_segmentation/swin_unetr_btcv_segmentation_3d.ipynb
+++ b/3d_segmentation/swin_unetr_btcv_segmentation_3d.ipynb
@@ -331,7 +331,7 @@
    "outputs": [],
    "source": [
     "# uncomment this command to download the JSON file directly\n",
-    "# wget -O data/dataset_0.json 'https://drive.google.com/uc?export=download&id=1qcGh41p-rI3H_sQ0JwOAhNiQSXriQqGi'"
+    "# wget -O data/dataset_0.json 'https://developer.download.nvidia.com/assets/Clara/monai/tutorials/swin_unetr_btcv_dataset_0.json'"
    ]
   },
   {
diff --git a/3d_segmentation/vista3d/vista3d_spleen_finetune.ipynb b/3d_segmentation/vista3d/vista3d_spleen_finetune.ipynb
index a5abb0172e..0c6d7c5e59 100644
--- a/3d_segmentation/vista3d/vista3d_spleen_finetune.ipynb
+++ b/3d_segmentation/vista3d/vista3d_spleen_finetune.ipynb
@@ -191,7 +191,7 @@
     }
    ],
    "source": [
-    "resource = \"https://drive.google.com/file/d/1Sbe6GjlgH-GIcXolZzUiwgqR4DBYNLQ3/view?usp=drive_link\"\n",
+    "resource = \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_vista3d.pt\"\n",
     "if not os.path.exists(os.path.join(root_dir, \"model.pt\")):\n",
     "    download_url(url=resource, filepath=os.path.join(root_dir, \"model.pt\"))\n",
     "if os.path.exists(os.path.join(root_dir, \"model.pt\")):\n",
diff --git a/competitions/MICCAI/surgtoolloc/preprocess_to_build_detection_dataset.ipynb b/competitions/MICCAI/surgtoolloc/preprocess_to_build_detection_dataset.ipynb
index 3efc8e849a..772778b111 100644
--- a/competitions/MICCAI/surgtoolloc/preprocess_to_build_detection_dataset.ipynb
+++ b/competitions/MICCAI/surgtoolloc/preprocess_to_build_detection_dataset.ipynb
@@ -71,7 +71,7 @@
     "## Load useful data\n",
     "\n",
     "As described in `readme.md`, we manually labeled 1126 frames in order to build the detection model.\n",
-    "Please download the manually labeled bounding boxes from [google drive](https://drive.google.com/file/d/1iO4bXTGdhRLIoxIKS6P_nNAgI_1Fp_Vg/view?usp=sharing), the uncompressed folder `labels` is saved into `label_14_tools_yolo_640_blur/`."
+    "Please download the manually labeled bounding boxes from [google drive](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/1126_frame_labels.zip), the uncompressed folder `labels` is saved into `label_14_tools_yolo_640_blur/`."
    ]
   },
   {
diff --git a/deployment/ray/mednist_classifier_ray.ipynb b/deployment/ray/mednist_classifier_ray.ipynb
index 0ddd0696f5..c487e8f113 100644
--- a/deployment/ray/mednist_classifier_ray.ipynb
+++ b/deployment/ray/mednist_classifier_ray.ipynb
@@ -122,7 +122,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "resource = \"https://drive.google.com/uc?id=1zKRi5FrwEES_J-AUkM7iBJwc__jy6ct6\"\n",
+    "resource = \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/deployment/classifier.zip\"\n",
     "dst = os.path.join(\"..\", \"bentoml\", \"classifier.zip\")\n",
     "if not os.path.exists(dst):\n",
     "    download_url(resource, dst)"
diff --git a/detection/README.md b/detection/README.md
index 716b48ddbc..39204fff73 100644
--- a/detection/README.md
+++ b/detection/README.md
@@ -46,7 +46,7 @@ Then run the following command and go directly to Sec. 3.2.
 python3 luna16_prepare_env_files.py
 ```
 
-Alternatively, you can download the original data and resample them by yourself with the following steps. Users can either download 1) mhd/raw data from [LUNA16](https://luna16.grand-challenge.org/Home/) or its [copy](https://drive.google.com/drive/folders/1-enN4eNEnKmjltevKg3W2V-Aj0nriQWE?usp=share_link), or 2) DICOM data from [LIDC-IDRI](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=1966254) with [NBIA Data Retriever](https://wiki.cancerimagingarchive.net/display/NBIA/Downloading+TCIA+Images).
+Alternatively, you can download the original data and resample them by yourself with the following steps. Users can either download 1) mhd/raw data from [LUNA16](https://luna16.grand-challenge.org/Home/), or 2) DICOM data from [LIDC-IDRI](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=1966254) with [NBIA Data Retriever](https://wiki.cancerimagingarchive.net/display/NBIA/Downloading+TCIA+Images).
 
 The raw CT images in LUNA16 have various voxel sizes. The first step is to resample them to the same voxel size, which is defined in the value of "spacing" in [./config/config_train_luna16_16g.json](./config/config_train_luna16_16g.json).
 
diff --git a/federated_learning/breast_density_challenge/data/README.md b/federated_learning/breast_density_challenge/data/README.md
index f06a23217e..b9bb359179 100644
--- a/federated_learning/breast_density_challenge/data/README.md
+++ b/federated_learning/breast_density_challenge/data/README.md
@@ -1,6 +1,6 @@
 ## Example breast density data
 
-Download example data from https://drive.google.com/file/d/1Fd9GLUIzbZrl4FrzI3Huzul__C8wwzyx/view?usp=sharing.
+Download example data from https://developer.download.nvidia.com/assets/Clara/monai/tutorials/fl/preprocessed.zip.
 Extract here.
 
 ## Data source
diff --git a/federated_learning/openfl/openfl_mednist_2d_registration/run_docker.md b/federated_learning/openfl/openfl_mednist_2d_registration/run_docker.md
index 26a751698b..1d33c1204d 100644
--- a/federated_learning/openfl/openfl_mednist_2d_registration/run_docker.md
+++ b/federated_learning/openfl/openfl_mednist_2d_registration/run_docker.md
@@ -118,7 +118,7 @@ $ fx envoy start --shard-name env_two --disable-tls --envoy-config-path envoy_co
 ```
 [13:48:42] INFO     🧿 Starting the Envoy.                                                                                                            envoy.py:53
 Downloading...
-From: https://drive.google.com/uc?id=1QsnnkvZyJPcbRoV_ArW8SnE1OTuoVbKE
+From: https://developer.download.nvidia.com/assets/Clara/monai/tutorials/MedNIST.tar.gz
 To: /tmp/tmpd60wcnn8/MedNIST.tar.gz
 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 61.8M/61.8M [00:04<00:00, 13.8MB/s]
 2022-07-22 13:48:48,735 - INFO - Downloaded: MedNIST.tar.gz
diff --git a/generation/maisi/data/README.md b/generation/maisi/data/README.md
index c6848f0caf..0a745d2527 100644
--- a/generation/maisi/data/README.md
+++ b/generation/maisi/data/README.md
@@ -62,7 +62,7 @@ The table below provides a summary of the number of volumes for each dataset.
 
 #### 3.1 Example preprocessed dataset
 
-We provide the preprocessed subset of [C4KC-KiTS](https://www.cancerimagingarchive.net/collection/c4kc-kits/) dataset used in the finetuning config `environment_maisi_controlnet_train.json`. The dataset and corresponding JSON data list can be downloaded from [this link](https://drive.google.com/drive/folders/1iMStdYxcl26dEXgJEXOjkWvx-I2fYZ2u?usp=sharing) and should be saved in `maisi/dataset/` folder.
+We provide the preprocessed subset of [C4KC-KiTS](https://www.cancerimagingarchive.net/collection/c4kc-kits/) dataset used in the finetuning config `environment_maisi_controlnet_train.json`. The [dataset](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_maisi_C4KC-KiTS_subset.zip) and [corresponding JSON data](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_maisi_C4KC-KiTS_subset.json) list can be downloaded and should be saved in `maisi/dataset/` folder.
 
 The structure of example folder in the preprocessed dataset is:
 
diff --git a/generation/maisi/maisi_inference_tutorial.ipynb b/generation/maisi/maisi_inference_tutorial.ipynb
index ce03d9b1da..6b7ac286b4 100644
--- a/generation/maisi/maisi_inference_tutorial.ipynb
+++ b/generation/maisi/maisi_inference_tutorial.ipynb
@@ -157,35 +157,41 @@
     "files = [\n",
     "    {\n",
     "        \"path\": \"models/autoencoder_epoch273.pt\",\n",
-    "        \"url\": \"https://drive.google.com/file/d/1Ojw25lFO8QbHkxazdK4CgZTyp3GFNZGz/view?usp=sharing\",\n",
+    "        \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials\"\n",
+    "        \"/model_zoo/model_maisi_autoencoder_epoch273_alternative.pt\",\n",
     "    },\n",
     "    {\n",
     "        \"path\": \"models/input_unet3d_data-all_steps1000size512ddpm_random_current_inputx_v1.pt\",\n",
-    "        \"url\": \"https://drive.google.com/file/d/1lklNv4MTdI_9bwFRMd98QQ7JLerR5gC_/view?usp=drive_link\",\n",
+    "        \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo\"\n",
+    "        \"/model_maisi_input_unet3d_data-all_steps1000size512ddpm_random_current_inputx_v1_alternative.pt\",\n",
     "    },\n",
     "    {\n",
     "        \"path\": \"models/controlnet-20datasets-e20wl100fold0bc_noi_dia_fsize_current.pt\",\n",
-    "        \"url\": \"https://drive.google.com/file/d/1mLYeqeZ819_WpZPlAInhcWuCIHgn3QNT/view?usp=drive_link\",\n",
+    "        \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo\"\n",
+    "        \"/model_maisi_controlnet-20datasets-e20wl100fold0bc_noi_dia_fsize_current_alternative.pt\",\n",
     "    },\n",
     "    {\n",
     "        \"path\": \"models/mask_generation_autoencoder.pt\",\n",
-    "        \"url\": \"https://drive.google.com/file/d/19JnX-C6QAg4RfghTwpPnj4KEWhtawpCy/view?usp=drive_link\",\n",
+    "        \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai\" \"/tutorials/mask_generation_autoencoder.pt\",\n",
     "    },\n",
     "    {\n",
     "        \"path\": \"models/mask_generation_diffusion_unet.pt\",\n",
-    "        \"url\": \"https://drive.google.com/file/d/1yOQvlhXFGY1ZYavADM3N34vgg5AEitda/view?usp=drive_link\",\n",
+    "        \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai\"\n",
+    "        \"/tutorials/model_zoo/model_maisi_mask_generation_diffusion_unet_v2.pt\",\n",
     "    },\n",
     "    {\n",
     "        \"path\": \"configs/candidate_masks_flexible_size_and_spacing_3000.json\",\n",
-    "        \"url\": \"https://drive.google.com/file/d/1yMkH-lrAsn2YUGoTuVKNMpicziUmU-1J/view?usp=sharing\",\n",
+    "        \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai\"\n",
+    "        \"/tutorials/candidate_masks_flexible_size_and_spacing_3000.json\",\n",
     "    },\n",
     "    {\n",
     "        \"path\": \"configs/all_anatomy_size_condtions.json\",\n",
-    "        \"url\": \"https://drive.google.com/file/d/1AJyt1DSoUd2x2AOQOgM7IxeSyo4MXNX0/view?usp=sharing\",\n",
+    "        \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/all_anatomy_size_condtions.json\",\n",
     "    },\n",
     "    {\n",
     "        \"path\": \"datasets/all_masks_flexible_size_and_spacing_3000.zip\",\n",
-    "        \"url\": \"https://drive.google.com/file/d/16MKsDKkHvDyF2lEir4dzlxwex_GHStUf/view?usp=sharing\",\n",
+    "        \"url\": \"https://developer.download.nvidia.com/assets/Clara/monai\"\n",
+    "        \"/tutorials/model_zoo/model_maisi_all_masks_flexible_size_and_spacing_3000.zip\",\n",
     "    },\n",
     "]\n",
     "\n",
diff --git a/generation/maisi/scripts/inference.py b/generation/maisi/scripts/inference.py
index cd72073bf9..ffd96a5e8b 100644
--- a/generation/maisi/scripts/inference.py
+++ b/generation/maisi/scripts/inference.py
@@ -76,35 +76,35 @@ def main():
     files = [
         {
             "path": "models/autoencoder_epoch273.pt",
-            "url": "https://drive.google.com/file/d/1Ojw25lFO8QbHkxazdK4CgZTyp3GFNZGz/view?usp=sharing",
+            "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_maisi_autoencoder_epoch273_alternative.pt",
         },
         {
             "path": "models/input_unet3d_data-all_steps1000size512ddpm_random_current_inputx_v1.pt",
-            "url": "https://drive.google.com/file/d/1lklNv4MTdI_9bwFRMd98QQ7JLerR5gC_/view?usp=drive_link",
+            "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_maisi_input_unet3d_data-all_steps1000size512ddpm_random_current_inputx_v1_alternative.pt",
         },
         {
             "path": "models/controlnet-20datasets-e20wl100fold0bc_noi_dia_fsize_current.pt",
-            "url": "https://drive.google.com/file/d/1mLYeqeZ819_WpZPlAInhcWuCIHgn3QNT/view?usp=drive_link",
+            "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_maisi_controlnet-20datasets-e20wl100fold0bc_noi_dia_fsize_current_alternative.pt",
         },
         {
             "path": "models/mask_generation_autoencoder.pt",
-            "url": "https://drive.google.com/file/d/19JnX-C6QAg4RfghTwpPnj4KEWhtawpCy/view?usp=drive_link",
+            "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/mask_generation_autoencoder.pt",
         },
         {
             "path": "models/mask_generation_diffusion_unet.pt",
-            "url": "https://drive.google.com/file/d/1yOQvlhXFGY1ZYavADM3N34vgg5AEitda/view?usp=drive_link",
+            "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_maisi_mask_generation_diffusion_unet_v2.pt",
         },
         {
             "path": "configs/candidate_masks_flexible_size_and_spacing_3000.json",
-            "url": "https://drive.google.com/file/d/1yMkH-lrAsn2YUGoTuVKNMpicziUmU-1J/view?usp=sharing",
+            "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/candidate_masks_flexible_size_and_spacing_3000.json",
         },
         {
             "path": "configs/all_anatomy_size_condtions.json",
-            "url": "https://drive.google.com/file/d/1AJyt1DSoUd2x2AOQOgM7IxeSyo4MXNX0/view?usp=sharing",
+            "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/all_anatomy_size_condtions.json",
         },
         {
             "path": "datasets/all_masks_flexible_size_and_spacing_3000.zip",
-            "url": "https://drive.google.com/file/d/16MKsDKkHvDyF2lEir4dzlxwex_GHStUf/view?usp=sharing",
+            "url": "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/model_zoo/model_maisi_all_masks_flexible_size_and_spacing_3000.zip",
         },
     ]
 
diff --git a/modules/benchmark_global_mutual_information.ipynb b/modules/benchmark_global_mutual_information.ipynb
index dee9ea91c9..b5bdf1bb96 100644
--- a/modules/benchmark_global_mutual_information.ipynb
+++ b/modules/benchmark_global_mutual_information.ipynb
@@ -149,7 +149,7 @@
     "    os.makedirs(directory, exist_ok=True)\n",
     "root_dir = tempfile.mkdtemp() if directory is None else directory\n",
     "print(f\"root dir is: {root_dir}\")\n",
-    "file_url = \"https://drive.google.com/uc?id=17tsDLvG_GZm7a4fCVMCv-KyDx0hqq1ji\"\n",
+    "file_url = \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/Prostate_T2W_AX_1.nii\"\n",
     "file_path = f\"{root_dir}/Prostate_T2W_AX_1.nii\"\n",
     "download_url(file_url, file_path)"
    ]
diff --git a/modules/engines/gan_training.py b/modules/engines/gan_training.py
index ba116fcf24..c0d941b90f 100644
--- a/modules/engines/gan_training.py
+++ b/modules/engines/gan_training.py
@@ -14,7 +14,7 @@
     Sample script using MONAI to train a GAN to synthesize images from a latent code.
 
 ## Get the dataset
-    MedNIST.tar.gz link: https://drive.google.com/uc?id=1QsnnkvZyJPcbRoV_ArW8SnE1OTuoVbKE
+    MedNIST.tar.gz link: https://developer.download.nvidia.com/assets/Clara/monai/tutorials/MedNIST.tar.gz
     Extract tarball and set input_dir variable. GAN script trains using hand CT scan jpg images.
 
     Dataset information available in MedNIST Tutorial
diff --git a/modules/public_datasets.ipynb b/modules/public_datasets.ipynb
index d09219eae1..25ecf5f59d 100644
--- a/modules/public_datasets.ipynb
+++ b/modules/public_datasets.ipynb
@@ -595,7 +595,7 @@
    "outputs": [],
    "source": [
     "class IXIDataset(Randomizable, CacheDataset):\n",
-    "    resource = \"https://drive.google.com/file/d/1f5odq9smadgeJmDeyEy_UOjEtE_pkKc0/view?usp=sharing\"\n",
+    "    resource = \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/IXI-T1.tar\"\n",
     "    md5 = \"34901a0593b41dd19c1a1f746eac2d58\"\n",
     "\n",
     "    def __init__(\n",
diff --git a/modules/resample_benchmark.ipynb b/modules/resample_benchmark.ipynb
index 2d3fcc6940..78f84997c8 100644
--- a/modules/resample_benchmark.ipynb
+++ b/modules/resample_benchmark.ipynb
@@ -174,7 +174,7 @@
      "text": [
       "\n",
       "Downloading...\n",
-      "From: https://drive.google.com/uc?id=17tsDLvG_GZm7a4fCVMCv-KyDx0hqq1ji\n",
+      "From: https://developer.download.nvidia.com/assets/Clara/monai/tutorials/Prostate_T2W_AX_1.nii\n",
       "To: /tmp/tmp2euy74rf/mri.nii\n",
       "100%|██████████| 12.1M/12.1M [00:00<00:00, 210MB/s]"
      ]
diff --git a/monailabel/monailabel_wholebody_totalSegmentator_3dslicer.ipynb b/monailabel/monailabel_wholebody_totalSegmentator_3dslicer.ipynb
index a41c5b9b18..500ee83a40 100644
--- a/monailabel/monailabel_wholebody_totalSegmentator_3dslicer.ipynb
+++ b/monailabel/monailabel_wholebody_totalSegmentator_3dslicer.ipynb
@@ -94,7 +94,7 @@
     "\n",
     " - If you are going to use full dataset of TotalSegmentator, please refer to the dataset link, download the data, create and preprocess the images following [this page](https://zenodo.org/record/6802614).\n",
     " \n",
-    " - In this tutorial, we prepared a sample subset, resampled and ready to use. The subset is only for demonstration. Download [here](https://drive.google.com/file/d/1DtDmERVMjks1HooUhggOKAuDm0YIEunG/view?usp=sharing).\n",
+    " - In this tutorial, we prepared a sample subset, resampled and ready to use. The subset is only for demonstration. Download [here](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/totalSegmentator_mergedLabel_samples.zip).\n",
     "  \n",
     " To use the bundle, users need to download the data and merge all annotated labels into one NIFTI file. Each file contains 0-104 values, each value represents one anatomy class.\n",
     " \n",
diff --git a/multimodal/openi_multilabel_classification_transchex/README.md b/multimodal/openi_multilabel_classification_transchex/README.md
index d9ec346185..1ddc34a94d 100644
--- a/multimodal/openi_multilabel_classification_transchex/README.md
+++ b/multimodal/openi_multilabel_classification_transchex/README.md
@@ -16,6 +16,6 @@ completed, the dataset can be readily used for the tutorial.
 1) Create a new folder named 'monai_data' for downloading the raw data and preprocessing.
 2) Download the chest X-ray images in PNG format from this [link](https://openi.nlm.nih.gov/imgs/collections/NLMCXR_png.tgz). Copy the downloaded file (NLMCXR_png.tgz) to 'monai_data' directory and extract it to 'monai_data/dataset_orig/NLMCXR_png/'.
 3) Download the reports in XML format from this [link](https://openi.nlm.nih.gov/imgs/collections/NLMCXR_reports.tgz). Copy the downloaded file (NLMCXR_reports.tgz) to 'monai_data' directory and extract it to 'monai_data/dataset_orig/NLMCXR_reports/'.
-4) Download the splits of train, validation and test datasets from this [link](https://drive.google.com/u/1/uc?id=1jvT0jVl9mgtWy4cS7LYbF43bQE4mrXAY&export=download). Copy the downloaded file (TransChex_openi.zip)
+4) Download the splits of train, validation and test datasets from this [link](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/TransChex_openi.zip). Copy the downloaded file (TransChex_openi.zip)
 to 'monai_data' directory and extract it here.
 5) Run 'preprocess_openi.py' to process the images and reports.
diff --git a/pathology/multiple_instance_learning/README.md b/pathology/multiple_instance_learning/README.md
index 198ae2f2a2..0fc05976ac 100644
--- a/pathology/multiple_instance_learning/README.md
+++ b/pathology/multiple_instance_learning/README.md
@@ -49,7 +49,7 @@ python ./panda_mil_train_evaluate_pytorch_gpu.py -h
 
 Train in multi-gpu mode with AMP using all available gpus,
 assuming the training images are in the `/PandaChallenge2020/train_images` folder,
-it will use the pre-defined 80/20 data split in [datalist_panda_0.json](https://drive.google.com/drive/u/0/folders/1CAHXDZqiIn5QUfg5A7XsK1BncRu6Ftbh)
+it will use the pre-defined 80/20 data split in [datalist_panda_0.json](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/datalist_panda_0.json)
 
 ```bash
 python -u panda_mil_train_evaluate_pytorch_gpu.py \
diff --git a/pathology/multiple_instance_learning/panda_mil_train_evaluate_pytorch_gpu.py b/pathology/multiple_instance_learning/panda_mil_train_evaluate_pytorch_gpu.py
index 58a859397a..fce1f57a95 100644
--- a/pathology/multiple_instance_learning/panda_mil_train_evaluate_pytorch_gpu.py
+++ b/pathology/multiple_instance_learning/panda_mil_train_evaluate_pytorch_gpu.py
@@ -530,7 +530,7 @@ def parse_args():
 
     if args.dataset_json is None:
         # download default json datalist
-        resource = "https://drive.google.com/uc?id=1L6PtKBlHHyUgTE4rVhRuOLTQKgD4tBRK"
+        resource = "https://developer.download.nvidia.com/assets/Clara/monai/tutorials/datalist_panda_0.json"
         dst = "./datalist_panda_0.json"
         if not os.path.exists(dst):
             gdown.download(resource, dst, quiet=False)
diff --git a/pathology/tumor_detection/README.MD b/pathology/tumor_detection/README.MD
index 20c1aa86b5..d53cee9d2c 100644
--- a/pathology/tumor_detection/README.MD
+++ b/pathology/tumor_detection/README.MD
@@ -18,11 +18,11 @@ The license for the pre-trained model used in examples is different than MONAI l
 
 All the data used to train and validate this model is from the [Camelyon-16 Challenge](https://camelyon16.grand-challenge.org/). You can download all the images for the "CAMELYON16" data set from various sources listed [here](https://camelyon17.grand-challenge.org/Data/).
 
-Location information for training/validation patches (the location on the whole slide image where patches are extracted) is adopted from [NCRF/coords](https://github.com/baidu-research/NCRF/tree/master/coords). The reformatted coordinations and labels in CSV format for training (`training.csv`) can be found [here](https://drive.google.com/file/d/1httIjgji6U6rMIb0P8pE0F-hXFAuvQEf/view?usp=sharing) and for validation (`validation.csv`) can be found [here](https://drive.google.com/file/d/1tJulzl9m5LUm16IeFbOCoFnaSWoB6i5L/view?usp=sharing).
+Location information for training/validation patches (the location on the whole slide image where patches are extracted) is adopted from [NCRF/coords](https://github.com/baidu-research/NCRF/tree/master/coords). The reformatted coordinations and labels in CSV format for training (`training.csv`) can be found [here](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/pathology_train.csv) and for validation (`validation.csv`) can be found [here](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/pathology_validation.csv).
 
 This pipeline expects the training/validation data (whole slide images) reside in `cfg["data_root"]/training/images`. By default `data_root` is pointing to the code folder `./`; however, you can easily modify it to point to a different directory by passing the following argument in the runtime: `--data-root /other/data/root/dir/`.
 
-> [`training_sub.csv`](https://drive.google.com/file/d/1rO8ZY-TrU9nrOsx-Udn1q5PmUYrLG3Mv/view?usp=sharing) and [`validation_sub.csv`](https://drive.google.com/file/d/130pqsrc2e9wiHIImL8w4fT_5NktEGel7/view?usp=sharing) is also provided to check the functionality of the pipeline using only two of the whole slide images: `tumor_001` (for training) and `tumor_101` (for validation). This dataset should not be used for the real training or any performance evaluation.
+> [`training_sub.csv`](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/pathology_train_sub.csv) and [`validation_sub.csv`](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/pathology_validation_sub.csv) is also provided to check the functionality of the pipeline using only two of the whole slide images: `tumor_001` (for training) and `tumor_101` (for validation). This dataset should not be used for the real training or any performance evaluation.
 
 ### Input and output formats
 
diff --git a/pathology/tumor_detection/ignite/profiling_camelyon_pipeline.ipynb b/pathology/tumor_detection/ignite/profiling_camelyon_pipeline.ipynb
index b993dda5fd..33b8268385 100644
--- a/pathology/tumor_detection/ignite/profiling_camelyon_pipeline.ipynb
+++ b/pathology/tumor_detection/ignite/profiling_camelyon_pipeline.ipynb
@@ -92,7 +92,7 @@
    "source": [
     "### Download data\n",
     "\n",
-    "The pipeline that we are profiling `camelyon_train_evaluate_nvtx_profiling.py` required [Camelyon-16 Challenge](https://camelyon16.grand-challenge.org/) dataset.  You can download all the images for \"CAMELYON16\" data set from sources listed [here](https://camelyon17.grand-challenge.org/Data/). Also you can find the coordinations and labels for training (`training.csv`) [here](https://drive.google.com/file/d/1httIjgji6U6rMIb0P8pE0F-hXFAuvQEf/view?usp=sharing) and for validation (`validation.csv`) [here](https://drive.google.com/file/d/1tJulzl9m5LUm16IeFbOCoFnaSWoB6i5L/view?usp=sharing).\n",
+    "The pipeline that we are profiling `camelyon_train_evaluate_nvtx_profiling.py` required [Camelyon-16 Challenge](https://camelyon16.grand-challenge.org/) dataset.  You can download all the images for \"CAMELYON16\" data set from sources listed [here](https://camelyon17.grand-challenge.org/Data/). Also you can find the coordinations and labels for training (`training.csv`) [here](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/pathology_train.csv) and for validation (`validation.csv`) [here](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/pathology_validation.csv).\n",
     "\n",
     "However, for the demo of this notebook, we are downloading a very small subset of Camelyon dataset, which uses only one whole slide image `tumor_091.tif` .\n"
    ]
@@ -107,7 +107,7 @@
      "output_type": "stream",
      "text": [
       "Downloading...\n",
-      "From: https://drive.google.com/uc?id=1uWS4CXKD-NP_6-SgiQbQfhFMzbs0UJIr\n",
+      "From: https://developer.download.nvidia.com/assets/Clara/monai/tutorials/tumor_091.annotation\n",
       "To: /workspace/Code/tutorials/pathology/tumor_detection/ignite/training.csv\n",
       "100%|██████████| 153k/153k [00:00<00:00, 1.75MB/s]\n",
       "Downloading...\n",
@@ -130,7 +130,7 @@
    ],
    "source": [
     "# Download training.csv\n",
-    "dataset_url = \"https://drive.google.com/uc?id=1uWS4CXKD-NP_6-SgiQbQfhFMzbs0UJIr\"\n",
+    "dataset_url = \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/tumor_091.annotation\"\n",
     "dataset_path = \"training.csv\"\n",
     "gdown.download(dataset_url, dataset_path, quiet=False)\n",
     "\n",
@@ -139,7 +139,7 @@
     "image_dir = os.path.join(\"training\", \"images\", \"\")\n",
     "if not os.path.exists(image_dir):\n",
     "    os.makedirs(image_dir)\n",
-    "image_url = \"https://drive.google.com/uc?id=1OxAeCMVqH9FGpIWpAXSEJe6cLinEGQtF\"\n",
+    "image_url = \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/tumor_091.tif\"\n",
     "gdown.download(image_url, image_dir, quiet=False)"
    ]
   },
diff --git a/performance_profiling/pathology/profiling_train_base_nvtx.md b/performance_profiling/pathology/profiling_train_base_nvtx.md
index 7f1a6d2985..debb4ab3d4 100644
--- a/performance_profiling/pathology/profiling_train_base_nvtx.md
+++ b/performance_profiling/pathology/profiling_train_base_nvtx.md
@@ -20,9 +20,9 @@ For training and validation steps, they are easier to track by setting NVTX anno
 
 ## Data Preparation
 
-The pipeline that we are profiling `train_evaluate_nvtx.py` requires the [Camelyon-16 Challenge](https://camelyon16.grand-challenge.org/) dataset. You can download all the images for the "CAMELYON16" data set from the sources listed [here](https://camelyon17.grand-challenge.org/Data/)](https://camelyon17.grand-challenge.org/Data/). Location information for training/validation patches (the location on the whole slide image where patches are extracted) is adopted from [NCRF/coords](https://github.com/baidu-research/NCRF/tree/master/coords). The reformatted coordinations and labels in CSV format for training (`training.csv`) can be found [here](https://drive.google.com/file/d/1httIjgji6U6rMIb0P8pE0F-hXFAuvQEf/view?usp=sharing) and for validation (`validation.csv`) can be found [here](https://drive.google.com/file/d/1tJulzl9m5LUm16IeFbOCoFnaSWoB6i5L/view?usp=sharing).
+The pipeline that we are profiling `train_evaluate_nvtx.py` requires the [Camelyon-16 Challenge](https://camelyon16.grand-challenge.org/) dataset. You can download all the images for the "CAMELYON16" data set from the sources listed [here](https://camelyon17.grand-challenge.org/Data/)](https://camelyon17.grand-challenge.org/Data/). Location information for training/validation patches (the location on the whole slide image where patches are extracted) is adopted from [NCRF/coords](https://github.com/baidu-research/NCRF/tree/master/coords). The reformatted coordinations and labels in CSV format for training (`training.csv`) can be found [here](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/pathology_train.csv) and for validation (`validation.csv`) can be found [here](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/pathology_validation.csv).
 
-> [`training_sub.csv`](https://drive.google.com/file/d/1rO8ZY-TrU9nrOsx-Udn1q5PmUYrLG3Mv/view?usp=sharing) and [`validation_sub.csv`](https://drive.google.com/file/d/130pqsrc2e9wiHIImL8w4fT_5NktEGel7/view?usp=sharing) is also provided to check the functionality of the pipeline using only two of the whole slide images: `tumor_001` (for training) and `tumor_101` (for validation). This dataset should not be used for the real training.
+> [`training_sub.csv`](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/pathology_train_sub.csv) and [`validation_sub.csv`](https://developer.download.nvidia.com/assets/Clara/monai/tutorials/pathology_validation_sub.csv) is also provided to check the functionality of the pipeline using only two of the whole slide images: `tumor_001` (for training) and `tumor_101` (for validation). This dataset should not be used for the real training.
 
 ## Run Nsight Profiling
 
diff --git a/vista_2d/vista_2d_tutorial_monai.ipynb b/vista_2d/vista_2d_tutorial_monai.ipynb
index 026f9bf568..fe2db6ac8a 100644
--- a/vista_2d/vista_2d_tutorial_monai.ipynb
+++ b/vista_2d/vista_2d_tutorial_monai.ipynb
@@ -225,7 +225,7 @@
    ],
    "source": [
     "data_list_path = os.path.join(root_dir, \"cellpose_toy_datalist.json\")\n",
-    "data_list_path_url = \"https://drive.google.com/file/d/1dohTDfWO2ruhSqyYMjS0UF5GGGUxrw5N/view?usp=drive_link\"\n",
+    "data_list_path_url = \"https://developer.download.nvidia.com/assets/Clara/monai/tutorials/cellpose_toy_datalist.json\"\n",
     "download_url(url=data_list_path_url, filepath=data_list_path)\n",
     "\n",
     "sam_weights_path_url = \"https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth\"\n",