{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Imports" ] }, { "cell_type": "code", "execution_count": 770, "metadata": {}, "outputs": [], "source": [ "import os\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import random\n", "import time\n", "import splitfolders\n", "from glob import glob\n", "from PIL import Image\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "import torchvision.transforms as transforms\n", "from torch.optim.lr_scheduler import LambdaLR\n", "from torch.utils.data import Dataset, DataLoader\n", "from sklearn.preprocessing import MinMaxScaler\n", "from skimage.metrics import structural_similarity as ssim, peak_signal_noise_ratio as psnr, normalized_root_mse as nrmse\n", "from torchsummary import summary\n", "import pyRAPL\n", "from pyRAPL.measurement import Measurement\n", "import pynvml" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Data Loader" ] }, { "cell_type": "code", "execution_count": 771, "metadata": {}, "outputs": [], "source": [ "class VolumeDataset(Dataset):\n", " def __init__(self, df200_dir, df200_list, df1_dir, df1_list, transform=None):\n", " self.df200_dir = df200_dir\n", " self.df200_list = df200_list\n", " self.df1_dir = df1_dir\n", " self.df1_list = df1_list\n", " self.transform = transform\n", "\n", " def __len__(self):\n", " return len(self.df200_list)\n", "\n", " def __getitem__(self, idx):\n", " df200_path = os.path.join(self.df200_dir, self.df200_list[idx])\n", " df1_path = os.path.join(self.df1_dir, self.df1_list[idx])\n", " \n", " # df200 = np.load(df200_path) \n", " df200 = Image.open(df200_path).convert(\"RGB\")\n", " df200 = np.array(df200).astype(np.float32)\n", " df200 = np.transpose(df200, (2, 0, 1))\n", " # df1 = np.load(df1_path) \n", " df1 = Image.open(df1_path).convert(\"RGB\")\n", " df1 = np.array(df1).astype(np.float32)\n", " df1 = np.transpose(df1, (2, 0, 1))\n", "\n", " df200 = torch.from_numpy(df200).float()\n", " df1 = torch.from_numpy(df1).float()\n", "\n", " if self.transform:\n", " df200 = self.transform(df200)\n", " df1 = self.transform(df1)\n", "\n", " return df200, df1, self.df200_list[idx].replace(\".bmp\", \"\")\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Model Definition" ] }, { "cell_type": "code", "execution_count": 772, "metadata": {}, "outputs": [], "source": [ "def conv2x(in_c, out_c, depthwise=False):\n", " if depthwise:\n", " conv = nn.Sequential(\n", " nn.Conv2d(in_c, in_c, kernel_size=3, padding=1, groups=in_c),\n", " nn.BatchNorm2d(in_c),\n", " nn.ReLU(inplace=True),\n", " nn.Conv2d(in_c, out_c, kernel_size=1),\n", " nn.BatchNorm2d(out_c),\n", " nn.ReLU(inplace=True),\n", " nn.Conv2d(out_c, out_c, kernel_size=3, padding=1, groups=out_c),\n", " nn.BatchNorm2d(out_c),\n", " nn.ReLU(inplace=True),\n", " nn.Conv2d(out_c, out_c, kernel_size=1),\n", " nn.BatchNorm2d(out_c),\n", " nn.ReLU(inplace=True),\n", " )\n", " else:\n", " conv = nn.Sequential(\n", " nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),\n", " nn.BatchNorm2d(out_c),\n", " nn.ReLU(inplace=True),\n", " nn.Conv2d(out_c, out_c, kernel_size=3, padding=1),\n", " nn.BatchNorm2d(out_c),\n", " nn.ReLU(inplace=True)\n", " )\n", " return conv\n", "\n", "\n", "class UNet2D(nn.Module):\n", " def __init__(self, depthwise=False):\n", " super(UNet2D, self).__init__()\n", "\n", " self.depthwise = depthwise\n", " self.upsample = nn.Upsample(scale_factor=2, mode='nearest')\n", " self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n", "\n", " self.conv2x_0 = conv2x(3, 16, depthwise=self.depthwise)\n", " self.conv2x_1 = conv2x(16, 32, depthwise=self.depthwise)\n", " self.conv2x_2 = conv2x(32, 64, depthwise=self.depthwise)\n", " self.conv2x_3 = conv2x(64, 128, depthwise=self.depthwise)\n", "\n", " self.conv2x_4 = conv2x(64 + 128, 64, depthwise=self.depthwise)\n", " self.conv2x_5 = conv2x(32 + 64, 32, depthwise=self.depthwise)\n", " self.conv2x_6 = conv2x(16 + 32, 16, depthwise=self.depthwise)\n", " \n", " self.out = nn.Conv2d(in_channels=16, out_channels=1, kernel_size=1)\n", "\n", " def forward(self, volume):\n", " e0 = self.conv2x_0(volume)\n", " e1 = self.pool(e0)\n", " e2 = self.conv2x_1(e1)\n", " e3 = self.pool(e2)\n", " e4 = self.conv2x_2(e3)\n", " e5 = self.pool(e4)\n", " e6 = self.conv2x_3(e5)\n", "\n", " d0 = self.upsample(e6)\n", " d0 = torch.cat((d0, e4), dim=1)\n", " d0 = self.conv2x_4(d0)\n", "\n", " d1 = self.upsample(d0)\n", " d1 = torch.cat((d1, e2), dim=1)\n", " d1 = self.conv2x_5(d1)\n", "\n", " d2 = self.upsample(d1)\n", " d2 = torch.cat((d2, e0), dim=1)\n", " d2 = self.conv2x_6(d2)\n", "\n", " out = self.out(d2)\n", "\n", " out += volume[:, 1:2, :, :]\n", "\n", " return out\n", "\n", "class UNet2DQuantized(nn.Module):\n", " def __init__(self, depthwise=False):\n", " super(UNet2DQuantized, self).__init__()\n", "\n", " self.depthwise=depthwise\n", " self.quant = torch.ao.quantization.QuantStub()\n", " self.dequant = torch.ao.quantization.DeQuantStub()\n", " self.FF = torch.ao.nn.quantized.FloatFunctional()\n", "\n", " self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)\n", " self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n", "\n", " self.conv2x_0 = conv2x(3, 16, depthwise=self.depthwise)\n", " self.conv2x_1 = conv2x(16, 32, depthwise=self.depthwise)\n", " self.conv2x_2 = conv2x(32, 64, depthwise=self.depthwise)\n", " self.conv2x_3 = conv2x(64, 128, depthwise=self.depthwise)\n", "\n", " self.conv2x_4 = conv2x(64 + 128, 64, depthwise=self.depthwise)\n", " self.conv2x_5 = conv2x(32 + 64, 32, depthwise=self.depthwise)\n", " self.conv2x_6 = conv2x(16 + 32, 16, depthwise=self.depthwise)\n", " \n", " self.out = nn.Conv2d(in_channels=16, out_channels=1, kernel_size=1)\n", "\n", " def forward(self, volume):\n", " volume = self.quant(volume)\n", "\n", " e0 = self.conv2x_0(volume)\n", " e1 = self.pool(e0)\n", " e2 = self.conv2x_1(e1)\n", " e3 = self.pool(e2)\n", " e4 = self.conv2x_2(e3)\n", " e5 = self.pool(e4)\n", " e6 = self.conv2x_3(e5)\n", "\n", " d0 = self.upsample(e6)\n", " d0 = self.FF.cat((d0, e4), dim=1)\n", " d0 = self.conv2x_4(d0)\n", "\n", " d1 = self.upsample(d0)\n", " d1 = self.FF.cat((d1, e2), dim=1)\n", " d1 = self.conv2x_5(d1)\n", "\n", " d2 = self.upsample(d1)\n", " d2 = self.FF.cat((d2, e0), dim=1)\n", " d2 = self.conv2x_6(d2)\n", "\n", " out = self.out(d2)\n", "\n", " out = self.FF.add(out, volume[:, 1:2, :, :])\n", "\n", " out = self.dequant(out)\n", "\n", " return out" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Training Functions" ] }, { "cell_type": "code", "execution_count": 773, "metadata": {}, "outputs": [], "source": [ "def set_seed(seed):\n", " random.seed(seed)\n", " np.random.seed(seed)\n", " torch.manual_seed(seed)\n", " torch.random.manual_seed(seed)\n", " torch.cuda.manual_seed(seed)\n", " torch.cuda.manual_seed_all(seed)\n", " torch.use_deterministic_algorithms(True)\n", " torch.backends.cudnn.deterministic = True\n", " torch.backends.cudnn.benchmark = False\n", "\n", "def train_model(model, train_loader, epochs=200, lr=1e-3, min_lr=2.5e-4, device='cuda'):\n", " set_seed(42)\n", " criterion = nn.L1Loss()\n", " optimizer = optim.RMSprop(model.parameters(), lr=lr)\n", " scheduler = LambdaLR(optimizer, lr_lambda=lambda epoch: max(min_lr / lr, 1 - (epoch / epochs)))\n", "\n", " def init_weights(m):\n", " if isinstance(m, nn.Conv2d):\n", " nn.init.trunc_normal_(m.weight, mean=0.0, std=0.02)\n", " if m.bias is not None:\n", " nn.init.constant_(m.bias, 0)\n", " elif isinstance(m, nn.BatchNorm2d):\n", " nn.init.constant_(m.weight, 1)\n", " nn.init.constant_(m.bias, 0)\n", "\n", " model.apply(init_weights)\n", "\n", " epoch_losses = []\n", "\n", " for epoch in range(epochs):\n", " model.train()\n", " running_loss = 0.0\n", " total_batches = 0\n", " print(f\"Epoch {epoch + 1}/{epochs}\")\n", "\n", " for i, (X_train, Y_train) in enumerate(train_loader):\n", " X_train = X_train.to(device)\n", " Y_train = Y_train.to(device)\n", "\n", " outputs = model(X_train)\n", "\n", " loss = criterion(outputs, Y_train)\n", " optimizer.zero_grad()\n", " loss.backward()\n", " optimizer.step()\n", "\n", " running_loss += loss.item()\n", " total_batches += 1\n", "\n", " scheduler.step()\n", "\n", " average_loss = running_loss / total_batches\n", " epoch_losses.append(average_loss)\n", " print(f\"Epoch [{epoch + 1}/{epochs}], Average Training Loss: {average_loss:.4f}, Learning Rate: {scheduler.get_last_lr()[0]:.6f}\")\n", "\n", " torch.save(model.state_dict(), f'/home/ekd/ekd_research/pet_reconstruction/python/weights_trad_conv_bmp/{epoch + 1}.pt')\n", "\n", " print(\"Training complete.\")\n", "\n", " plt.figure(figsize=(10, 6))\n", " plt.plot(range(1, epochs + 1), epoch_losses, label=\"Training Loss\")\n", " plt.xlabel(\"Epoch\")\n", " plt.ylabel(\"Loss\")\n", " plt.title(\"Training Loss Across Epochs\")\n", " plt.legend()\n", " plt.grid()\n", " plt.show()\n", " return model" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Evaluation Functions\n" ] }, { "cell_type": "code", "execution_count": 774, "metadata": {}, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "import os\n", "\n", "def save_predicted_slice(predicted_volume, output_folder, filename):\n", " os.makedirs(output_folder, exist_ok=True)\n", "\n", " predicted_volume = predicted_volume.squeeze(1)\n", " predicted_slice = 1 - predicted_volume[0]\n", "\n", " # plt.figure(figsize=(6, 6))\n", " plt.imshow(predicted_slice, cmap='gray')\n", " plt.axis('off') # Remove axis for cleaner visualization\n", "\n", " save_path = os.path.join(output_folder, f\"{filename}.png\")\n", " plt.savefig(save_path, bbox_inches='tight', dpi=300)\n", " plt.close()\n", "\n", "def plot_pet_slice(input_volume, predicted_volume, ground_truth_volume, output_folder, filename):\n", " input_volume = input_volume.squeeze(1)\n", " predicted_volume = predicted_volume.squeeze(1)\n", " ground_truth_volume = ground_truth_volume.squeeze(1)\n", " \n", " fig, axes = plt.subplots(1, 3, figsize=(12, 6))\n", " \n", " axes[0].imshow(1-input_volume[0], cmap='grey')\n", " axes[0].set_title(f'Ultra-Low Dose Input')\n", " axes[0].axis('off')\n", " \n", " axes[1].imshow(1-predicted_volume[0], cmap='grey')\n", " axes[1].set_title(f'Prediction')\n", " axes[1].axis('off')\n", " \n", " axes[2].imshow(1-ground_truth_volume[0], cmap='grey')\n", " axes[2].set_title(f'Ground Truth')\n", " axes[2].axis('off')\n", " \n", " plt.tight_layout()\n", " # plt.show()\n", " \n", " os.makedirs(output_folder, exist_ok=True)\n", " \n", " output_path = os.path.join(output_folder, f\"{filename}.png\")\n", " \n", " plt.savefig(output_path, bbox_inches='tight', dpi=300)\n", " plt.close(fig) # Close figure to free memory\n", "\n", "def evaluate_model(model, test_loader, suppress_output=False, device='cuda'):\n", " set_seed(42)\n", " model.eval()\n", " \n", " low_dose_nrmse_list = []\n", " low_dose_psnr_list = []\n", " low_dose_ssim_list = []\n", "\n", " nrmse_list = []\n", " psnr_list = []\n", " ssim_list = []\n", " elapsed_list = []\n", "\n", " with torch.no_grad():\n", " for X_test, Y_test, filenames in test_loader:\n", " X_test = X_test.to(device)\n", " Y_test = Y_test.to(device)\n", " start_time = time.time()\n", " predictions = model(X_test)\n", " elapsed_list.append(time.time() - start_time)\n", " X_test = X_test.cpu().numpy()\n", " predictions = predictions.cpu().numpy()\n", " Y_test = Y_test.cpu().numpy()\n", " \n", " X_test = X_test[:, 1:2, :, :]\n", " Y_test = Y_test[:, 1:2, :, :]\n", " \n", " if not suppress_output:\n", " for i in range(X_test.shape[0]): # Loop through batch dimension\n", " # plot_pet_slice(X_test[i:i+1], predictions[i:i+1], Y_test[i:i+1],\n", " # '/home/ekd/ekd_research/pet_reconstruction/python/visualizations/software', filenames[i])\n", " save_predicted_slice(X_test[i:i+1], '/home/ekd/ekd_research/pet_reconstruction/python/visualizations/software1', filenames[i])\n", "\n", " for pred, gt in zip(predictions, Y_test):\n", " gt = gt.squeeze()\n", " pred = pred.squeeze()\n", "\n", " nrmse_value = nrmse(gt, pred, normalization='euclidean')\n", " psnr_value = psnr(gt, pred, data_range=gt.max() - gt.min())\n", " ssim_value = ssim(gt, pred, data_range=gt.max() - gt.min())\n", "\n", " nrmse_list.append(nrmse_value)\n", " psnr_list.append(psnr_value)\n", " ssim_list.append(ssim_value)\n", " \n", " for pred, gt in zip(X_test, Y_test):\n", " gt = gt.squeeze()\n", " pred = pred.squeeze()\n", "\n", " low_dose_nrmse_value = nrmse(gt, pred, normalization='euclidean')\n", " low_dose_psnr_value = psnr(gt, pred, data_range=gt.max() - gt.min())\n", " low_dose_ssim_value = ssim(gt, pred, data_range=gt.max() - gt.min())\n", "\n", " low_dose_nrmse_list.append(low_dose_nrmse_value)\n", " low_dose_psnr_list.append(low_dose_psnr_value)\n", " low_dose_ssim_list.append(low_dose_ssim_value)\n", " \n", " avg_low_dose_nrmse = np.mean(low_dose_nrmse_list)\n", " avg_low_dose_psnr = np.mean(low_dose_psnr_list)\n", " avg_low_dose_ssim = np.mean(low_dose_ssim_list)\n", "\n", " avg_nrmse = np.mean(nrmse_list)\n", " avg_psnr = np.mean(psnr_list)\n", " avg_ssim = np.mean(ssim_list)\n", " avg_elapsed = np.mean(elapsed_list)\n", "\n", " # if not suppress_output:\n", " print(f\"Average Low-Dose NRMSE: {avg_low_dose_nrmse:.4f}\")\n", " print(f\"Average Low-Dose PSNR: {avg_low_dose_psnr:.4f}\")\n", " print(f\"Average Low-Dose SSIM: {avg_low_dose_ssim:.4f}\")\n", "\n", " print(f\"\\nModel Evaluation Results:\")\n", " print(f\"Average NRMSE: {avg_nrmse:.4f}\")\n", " print(f\"Average PSNR: {avg_psnr:.4f}\")\n", " print(f\"Average SSIM: {avg_ssim:.4f}\")\n", " print(f\"Elapsed Time: {avg_elapsed:.6f}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Data Preprocessing \n" ] }, { "cell_type": "code", "execution_count": 775, "metadata": {}, "outputs": [], "source": [ "master_start_time = time.time()\n", "DATASET_PATH = \"/home/ekd/ekd_research/pet_reconstruction/python/data/\"\n", "\n", "# DF1_volume_list = sorted(glob(DATASET_PATH + 'volumes/DF1/*.npy'))\n", "# DF200_volume_list = sorted(glob(DATASET_PATH + 'volumes/DF200/*.npy'))\n", "\n", "# scaler = MinMaxScaler()\n", "# window_size = 3\n", "\n", "# for idx in range(len(DF1_volume_list)):\n", "# volume = np.load(DF1_volume_list[idx])\n", "# volume_flatten = volume.flatten().reshape(-1, 1)\n", "# scaled_flatten_volume = scaler.fit_transform(volume_flatten)\n", "# normalized_volume = scaled_flatten_volume.reshape(volume.shape)\n", "\n", "# height, width, num_slices = normalized_volume.shape\n", "# num_windows = num_slices - window_size + 1\n", "# # windows = np.zeros((num_windows, window_size, height, width))\n", "# windows = np.zeros((num_windows, height, width, window_size))\n", " \n", "# for i in range(num_windows):\n", "# # windows[i] = normalized_volume[:, :, i:i + window_size].transpose(2, 0, 1)\n", "# windows[i] = normalized_volume[:, :, i:i + window_size]\n", "\n", "# windows_cast = (windows[i] * 255).astype(np.uint8)\n", "# pil_image = Image.fromarray(windows_cast)\n", "# pil_image.save(DATASET_PATH + f'volumes_processed/DF1/volume_{idx}_input_{i}.bmp')\n", "\n", "# # np.save(DATASET_PATH + f'volumes_processed/DF1/volume_{idx}_input_{i}.npy', windows[i])\n", "\n", "# for idx in range(len(DF200_volume_list)):\n", "# volume = np.load(DF200_volume_list[idx])\n", "# volume_flatten = volume.flatten().reshape(-1, 1)\n", "# scaled_flatten_volume = scaler.fit_transform(volume_flatten)\n", "# normalized_volume = scaled_flatten_volume.reshape(volume.shape)\n", "\n", "# height, width, num_slices = normalized_volume.shape\n", "# num_windows = num_slices - window_size + 1\n", "# # windows = np.zeros((num_windows, window_size, height, width))\n", "# windows = np.zeros((num_windows, height, width, window_size))\n", " \n", "# for i in range(num_windows):\n", "# # windows[i] = normalized_volume[:, :, i:i + window_size].transpose(2, 0, 1)\n", "# windows[i] = normalized_volume[:, :, i:i + window_size]\n", "\n", "# windows_cast = (windows[i] * 255).astype(np.uint8)\n", "# pil_image = Image.fromarray(windows_cast)\n", "# pil_image.save(DATASET_PATH + f'volumes_processed/DF200/volume_{idx}_input_{i}.bmp')\n", "\n", "train_DF200_dir = DATASET_PATH + 'volumes_split/train/DF200/'\n", "train_DF1_dir = DATASET_PATH + 'volumes_split/train/DF1/'\n", "\n", "train_DF200_list = sorted([f for f in os.listdir(train_DF200_dir) if f.endswith('.bmp')])\n", "train_DF1_list = sorted([f for f in os.listdir(train_DF1_dir) if f.endswith('.bmp')])\n", "\n", "# splitfolders.ratio(input = DATASET_PATH + 'volumes_processed/', output = DATASET_PATH + 'volumes_split', seed=42, ratio = (.8, .2), group_prefix=None)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Model Training \n" ] }, { "cell_type": "code", "execution_count": 776, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "----------------------------------------------------------------\n", " Layer (type) Output Shape Param #\n", "================================================================\n", " Conv2d-1 [32, 16, 128, 128] 448\n", " BatchNorm2d-2 [32, 16, 128, 128] 32\n", " ReLU-3 [32, 16, 128, 128] 0\n", " Conv2d-4 [32, 16, 128, 128] 2,320\n", " BatchNorm2d-5 [32, 16, 128, 128] 32\n", " ReLU-6 [32, 16, 128, 128] 0\n", " MaxPool2d-7 [32, 16, 64, 64] 0\n", " Conv2d-8 [32, 32, 64, 64] 4,640\n", " BatchNorm2d-9 [32, 32, 64, 64] 64\n", " ReLU-10 [32, 32, 64, 64] 0\n", " Conv2d-11 [32, 32, 64, 64] 9,248\n", " BatchNorm2d-12 [32, 32, 64, 64] 64\n", " ReLU-13 [32, 32, 64, 64] 0\n", " MaxPool2d-14 [32, 32, 32, 32] 0\n", " Conv2d-15 [32, 64, 32, 32] 18,496\n", " BatchNorm2d-16 [32, 64, 32, 32] 128\n", " ReLU-17 [32, 64, 32, 32] 0\n", " Conv2d-18 [32, 64, 32, 32] 36,928\n", " BatchNorm2d-19 [32, 64, 32, 32] 128\n", " ReLU-20 [32, 64, 32, 32] 0\n", " MaxPool2d-21 [32, 64, 16, 16] 0\n", " Conv2d-22 [32, 128, 16, 16] 73,856\n", " BatchNorm2d-23 [32, 128, 16, 16] 256\n", " ReLU-24 [32, 128, 16, 16] 0\n", " Conv2d-25 [32, 128, 16, 16] 147,584\n", " BatchNorm2d-26 [32, 128, 16, 16] 256\n", " ReLU-27 [32, 128, 16, 16] 0\n", " Upsample-28 [32, 128, 32, 32] 0\n", " Conv2d-29 [32, 64, 32, 32] 110,656\n", " BatchNorm2d-30 [32, 64, 32, 32] 128\n", " ReLU-31 [32, 64, 32, 32] 0\n", " Conv2d-32 [32, 64, 32, 32] 36,928\n", " BatchNorm2d-33 [32, 64, 32, 32] 128\n", " ReLU-34 [32, 64, 32, 32] 0\n", " Upsample-35 [32, 64, 64, 64] 0\n", " Conv2d-36 [32, 32, 64, 64] 27,680\n", " BatchNorm2d-37 [32, 32, 64, 64] 64\n", " ReLU-38 [32, 32, 64, 64] 0\n", " Conv2d-39 [32, 32, 64, 64] 9,248\n", " BatchNorm2d-40 [32, 32, 64, 64] 64\n", " ReLU-41 [32, 32, 64, 64] 0\n", " Upsample-42 [32, 32, 128, 128] 0\n", " Conv2d-43 [32, 16, 128, 128] 6,928\n", " BatchNorm2d-44 [32, 16, 128, 128] 32\n", " ReLU-45 [32, 16, 128, 128] 0\n", " Conv2d-46 [32, 16, 128, 128] 2,320\n", " BatchNorm2d-47 [32, 16, 128, 128] 32\n", " ReLU-48 [32, 16, 128, 128] 0\n", " Conv2d-49 [32, 1, 128, 128] 17\n", "================================================================\n", "Total params: 488,705\n", "Trainable params: 488,705\n", "Non-trainable params: 0\n", "----------------------------------------------------------------\n", "Input size (MB): 6.00\n", "Forward/backward pass size (MB): 1648.00\n", "Params size (MB): 1.86\n", "Estimated Total Size (MB): 1655.86\n", "----------------------------------------------------------------\n" ] } ], "source": [ "use_depthwise = False\n", "device = 'cpu'\n", "\n", "model = UNet2D(depthwise=use_depthwise).to(device)\n", "\n", "train_dataset = VolumeDataset(train_DF200_dir, train_DF200_list, train_DF1_dir, train_DF1_list)\n", "train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)\n", "\n", "summary(model, input_size=(3, 128, 128), batch_size=32, device=device)\n", "\n", "# trained_model = train_model(model, train_loader, epochs=200, lr=1e-3, min_lr=2.5e-4, device=device)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Model Evaluation \n" ] }, { "cell_type": "code", "execution_count": 777, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Average Low-Dose NRMSE: 0.7330\n", "Average Low-Dose PSNR: 17.7156\n", "Average Low-Dose SSIM: 0.6931\n", "\n", "Model Evaluation Results:\n", "Average NRMSE: 0.1689\n", "Average PSNR: 31.5982\n", "Average SSIM: 0.9260\n", "Elapsed Time: 0.245073\n", "CPU Power Usage: 70.41720052281173 Watts\n", "2.1359570026397705\n" ] } ], "source": [ "\n", "test_DF200_dir = DATASET_PATH + 'volumes_split/val/DF200/'\n", "test_DF1_dir = DATASET_PATH + 'volumes_split/val/DF1/'\n", "\n", "test_DF200_list = sorted([f for f in os.listdir(test_DF200_dir) if f.endswith('.bmp')])\n", "test_DF1_list = sorted([f for f in os.listdir(test_DF1_dir) if f.endswith('.bmp')])\n", "\n", "test_dataset = VolumeDataset(test_DF200_dir, test_DF200_list, test_DF1_dir, test_DF1_list)\n", "test_loader = DataLoader(test_dataset, batch_size=32, shuffle=True, num_workers=4)\n", "\n", "if use_depthwise:\n", " model.load_state_dict(torch.load('/home/ekd/ekd_research/pet_reconstruction/python/weights/200.pt', weights_only=True))\n", "else:\n", " model.load_state_dict(torch.load('/home/ekd/ekd_research/pet_reconstruction/python/weights_trad_conv_bmp/200.pt', weights_only=True))\n", "\n", "torch.save(model, \"/home/ekd/ekd_research/pet_reconstruction/python/models/model_bmp.pt\")\n", "\n", "# dummy_input = torch.rand(32, 3, 128, 128)\n", "# torch.onnx.export(model, dummy_input, '/home/ekd/ekd_research/pet_reconstruction/python/models/model_bmp.onnx', opset_version=11)\n", "\n", "pyRAPL.setup()\n", "\n", "measure = pyRAPL.Measurement('fp32')\n", "measure.begin()\n", "\n", "evaluate_model(model, test_loader, suppress_output=True, device=device)\n", "\n", "measure.end()\n", "\n", "cpu_power_usage = measure.result.pkg[0] / measure.result.duration\n", "print(f\"CPU Power Usage: {cpu_power_usage} Watts\")\n", "\n", "master_end_time = time.time() - master_start_time\n", "print(master_end_time)" ] } ], "metadata": { "kernelspec": { "display_name": ".venv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 2 }