From 59cbd181e3821d00e1d5d83a03373076c9d13b40 Mon Sep 17 00:00:00 2001 From: lext Date: Sat, 16 Nov 2019 17:39:07 +0200 Subject: [PATCH] Preparing for webapp --- Dockerfile.cpu | 8 +++ Dockerfile.gpu | 8 +++ README.md | 66 ++--------------------- create_conda_env.sh | 17 ------ ouludeepknee/own_codes/predict.py | 6 +-- ouludeepknee/own_codes/produce_gradcam.py | 53 +++++++++--------- 6 files changed, 48 insertions(+), 110 deletions(-) create mode 100644 Dockerfile.cpu create mode 100644 Dockerfile.gpu delete mode 100644 create_conda_env.sh diff --git a/Dockerfile.cpu b/Dockerfile.cpu new file mode 100644 index 0000000..19aef9a --- /dev/null +++ b/Dockerfile.cpu @@ -0,0 +1,8 @@ +FROM miptmloulu/kneel:cpu + +MAINTAINER Aleksei Tiulpin, University of Oulu, Version 1.0 + +RUN rm -rf /opt/pkg +RUN mkdir -p /opt/pkg/ +COPY . /opt/pkg/ +RUN pip install -e /opt/pkg/ \ No newline at end of file diff --git a/Dockerfile.gpu b/Dockerfile.gpu new file mode 100644 index 0000000..fc8110f --- /dev/null +++ b/Dockerfile.gpu @@ -0,0 +1,8 @@ +FROM miptmloulu/kneel:gpu + +MAINTAINER Aleksei Tiulpin, University of Oulu, Version 1.0 + +RUN rm -rf /opt/pkg +RUN mkdir -p /opt/pkg/ +COPY . /opt/pkg/ +RUN pip install -e /opt/pkg/ \ No newline at end of file diff --git a/README.md b/README.md index b397543..e3a87a9 100644 --- a/README.md +++ b/README.md @@ -3,75 +3,15 @@ Codes for paper **Automatic Knee Osteoarthritis Diagnosis from Plain Radiographs *Tiulpin, A., Thevenot, J., Rahtu, E., Lehenkari, P., & Saarakkala, S. (2018). Automatic Knee Osteoarthritis Diagnosis from Plain Radiographs: A Deep Learning-Based Approach. Scientific reports, 8(1), 1727.* -## Background -Osteoarthritis (OA) is the 11th highest disability factor and it is associated with the cartilage and bone degeneration in the joints. The most common type of OA is the knee OA and it is causing an extremely high economical burden to the society while being difficult to diagnose. In this study we present a novel Deep Learning-based clinically applicable approach to diagnose knee osteoarthritis from plain radiographs (X-ray images) outperforming existing approaches. +# Disclaimer -## Attention maps examples -Our model learns localized radiological findings as we imposed prior anatomical knowledge to the network architecture. -Here are some examples of attention maps and predictions (ground truth for the provided images is Kellgren-Lawrence grade 2): - - - -## This repository includes - -- [x] Codes for the main experiments (Supplementary information of the article); -- [x] Pre-trained models; -- [x] Datasets generation scripts; -- [x] MOST and OAI cohorts bounding box annotations; -- [x] Conda environments; -- [x] Support of the inference on the external data. - -## Usage -This repository includes the training code and the pre-trained models from each of our experiments. Please, see the paper for more details. - -### Setting up the environment -For our experiments we used Ubuntu 14.04, CUDA 8.0 and CuDNN v6. - -For the convenience, we provide a script to set up a `conda` environment that should be used for training and inference of the models. -Create, configure, and activate it as follow: - -``` -$ ./create_conda_env.sh -$ conda activate deep_knee -``` +**This branch is made only for inference purposes. Re-training is possible only in the master branch!!!** ### Inference on your data -To run the inference on your DICOM data (assuming you followed the steps above), do the following: - -0. Clone the [KneeLocalizer](https://github.com/MIPT-Oulu/KneeLocalizer) repository, and produce -the file with the bounding boxes, which determine the locations of the knees in the images -(for the detailed instructions, see KneeLocalizer repository README file); -1. Clone the `DeepKnee` repository locally: - ``` - $ git clone git@github.com:MIPT-Oulu/DeepKnee.git - $ cd DeepKnee - ``` -2. Fetch the pre-trained models: - ``` - $ git lfs install && git lfs pull - ``` -3. Create 16bit .png files of the left and right knees from the provided DICOMs: - ``` - $ cd Dataset - $ python crop_rois_your_dataset.py --help - $ python crop_rois_your_dataset.py {parameters} - $ cd .. - ``` - **NOTE:** the image of the left knee will be horizontally flipped to match the right one. -4. Produce the file with KL gradings of the extracted knee images: - ``` - $ cd ouludeepknee/own_codes - $ python inference_own/predict.py --help - $ python inference_own/predict.py {parameters} - $ cd .. - ``` -### Training on your data -To run the training, execute the corresponding Bash files from the project directory -(validation is visualized in `visdom`). ## License -This code is freely available only for research purposes. +This code is freely available only for research purposes. Commercial use is not allowed by any means. ## How to cite ``` diff --git a/create_conda_env.sh b/create_conda_env.sh deleted file mode 100644 index f93cc8a..0000000 --- a/create_conda_env.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -conda create -y -n deep_knee python=3.6 -source activate deep_knee - -conda install -y -n deep_knee numpy opencv scipy pyyaml cython matplotlib scikit-learn -conda install -y -n deep_knee pytorch==0.3.1 torchvision -c soumith -conda install -y -n deep_knee git-lfs -c conda-forge - -pip install pip -U -pip install pydicom -pip install tqdm -pip install pillow -pip install torchvision -pip install termcolor -pip install visdom -pip install jupyterlab diff --git a/ouludeepknee/own_codes/predict.py b/ouludeepknee/own_codes/predict.py index a736017..883a3f8 100644 --- a/ouludeepknee/own_codes/predict.py +++ b/ouludeepknee/own_codes/predict.py @@ -21,7 +21,7 @@ from ouludeepknee.own_codes.dataset import get_pair -def load_model(filename, net): +def load_model(filename, net:torch.nn.Module): state_dict = torch.load(filename, map_location=lambda storage, loc: storage) try: net.load_state_dict(state_dict) @@ -88,8 +88,8 @@ def load_img(fname, img_proc, patch_proc): for fname in tqdm(imgs_fnames, total=len(imgs_fnames)): inp = load_img(fname, CenterCrop(300), patch_transform) - lateral = Variable(inp[0].unsqueeze(0), volatile=True) - medial = Variable(inp[1].unsqueeze(0), volatile=True) + lateral = inp[0].unsqueeze(0) + medial = inp[1].unsqueeze(0) res = 0 for m in models: res += m(lateral, medial) diff --git a/ouludeepknee/own_codes/produce_gradcam.py b/ouludeepknee/own_codes/produce_gradcam.py index 485a2d1..9ac3bc3 100644 --- a/ouludeepknee/own_codes/produce_gradcam.py +++ b/ouludeepknee/own_codes/produce_gradcam.py @@ -12,7 +12,6 @@ import torch import torch.nn as nn import torch.nn.functional as F -from torch.autograd import Variable import torchvision.transforms as transforms from tqdm import tqdm @@ -26,6 +25,11 @@ else: maybe_cuda = 'cpu' +SNAPSHOTS_KNEE_GRADING = os.path.abspath(os.path.join( + os.path.dirname(__file__), '../snapshots_knee_grading')) + +SNAPSHOTS_EXPS = ['2017_10_10_12_30_42', '2017_10_10_12_30_46', '2017_10_10_12_30_49'] + def smooth_edge_mask(s, w): res = np.zeros((s + w * 2, s + w * 2)) @@ -74,7 +78,7 @@ def __init__(self, snapshots_paths, mean_std_path): self.mean_std_path = mean_std_path if torch.cuda.is_available(): - self.cuda() + self.to('cuda') def init_networks_from_states(self): mean_vector, std_vector = np.load(self.mean_std_path) @@ -88,7 +92,7 @@ def init_networks_from_states(self): nets = [] for state in self.states: if torch.cuda.is_available(): - net = nn.DataParallel(KneeNet(64, 0.2, True)).cuda() + net = nn.DataParallel(KneeNet(64, 0.2, True)).to('cuda') else: net = nn.DataParallel(KneeNet(64, 0.2, True)) net.load_state_dict(state) @@ -159,34 +163,38 @@ def load_picture(self, fname, nbits=16, flip_left=False): @staticmethod def decompose_forward_avg(net, l, m): - l_o = net.branch(l) - m_o = net.branch(m) + with torch.no_grad(): + l_o = net.branch(l) + m_o = net.branch(m) + + l_o.requres_grad = True + m_o.requres_grad = True concat = torch.cat([l_o, m_o], 1) o = net.final(concat.view(l.size(0), net.final.in_features)) return l_o, m_o, o @staticmethod - def extract_features_branch(net, l, m, wl, wm): + def extract_features_branch(model, l, m, wl, wm): def weigh_maps(weights, maps): - maps = Variable(maps.squeeze()) + maps = maps.squeeze() weights = weights.squeeze() if torch.cuda.is_available(): - res = torch.zeros(maps.size()[-2:]).cuda() + res = torch.zeros(maps.size()[-2:]).to('cuda') else: - res = Variable(torch.zeros(maps.size()[-2:])) + res = torch.zeros(maps.size()[-2:]) for i, w in enumerate(weights): res += w * maps[i] return res # We need to re-assemble the architecture - branch = nn.Sequential(net.branch.block1, + branch = nn.Sequential(model.branch.block1, nn.MaxPool2d(2), - net.branch.block2, + model.branch.block2, nn.MaxPool2d(2), - net.branch.block3) + model.branch.block3) o_l = branch(l).data o_m = branch(m).data @@ -256,9 +264,9 @@ def predict(self, x, nbits=16, flip_left=False): self.zero_grad() if torch.cuda.is_available(): - out = self.forward(Variable(l.cuda()), Variable(m.cuda())) + out = self.forward(l.to('cuda'), m.to('cuda')) else: - out = self.forward(Variable(l), Variable(m)) + out = self.forward(l, m) probs = self.sm(out).data.cpu().numpy() @@ -266,21 +274,18 @@ def predict(self, x, nbits=16, flip_left=False): index = np.argmax(out.cpu().data.numpy(), axis=1).reshape(-1, 1) if torch.cuda.is_available(): - out.backward(torch.from_numpy(ohe.fit_transform(index)).float().cuda()) + out.backward(torch.from_numpy(ohe.fit_transform(index)).float().to('cuda')) else: out.backward(torch.from_numpy(ohe.fit_transform(index)).float()) if torch.cuda.is_available(): - heatmap = self.compute_gradcam( - Variable(l.cuda()), Variable(m.cuda()), 300, 128, 7) + heatmap = self.compute_gradcam(l.to('cuda'), m.to('cuda'), 300, 128, 7) else: - heatmap = self.compute_gradcam( - Variable(l), Variable(m), 300, 128, 7) + heatmap = self.compute_gradcam(l, m, 300, 128, 7) return img, heatmap, probs.squeeze() - def predict_save(self, fileobj_in, nbits=16, fname_suffix=None, path_dir_out='./', - flip_left=False): + def predict_save(self, fileobj_in, nbits=16, fname_suffix=None, path_dir_out='./', flip_left=False): if fname_suffix is not None: pass elif isinstance(fileobj_in, str): @@ -315,12 +320,6 @@ def predict_save(self, fileobj_in, nbits=16, fname_suffix=None, path_dir_out='./ return probs.squeeze().argmax() -SNAPSHOTS_KNEE_GRADING = os.path.abspath(os.path.join( - os.path.dirname(__file__), '../snapshots_knee_grading')) - -SNAPSHOTS_EXPS = ['2017_10_10_12_30_42', '2017_10_10_12_30_46', '2017_10_10_12_30_49'] - - def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--path_folds', default=SNAPSHOTS_KNEE_GRADING)