import HCAM
from torchvision.models import *
from fastai.vision.all import *
[CAM]HCAM Tutorial
Import
Image Loader
= HCAM.Img_loader(fil_path = 'random_pet_one',resize = 512) dls
CAM
=cnn_learner(dls,resnet34,metrics=error_rate)
lrnr1) lrnr.fine_tune(
epoch | train_loss | valid_loss | error_rate | time |
---|---|---|---|---|
0 | 0.124668 | 0.003786 | 0.000677 | 00:33 |
epoch | train_loss | valid_loss | error_rate | time |
---|---|---|---|---|
0 | 0.002606 | 0.000005 | 0.000000 | 00:43 |
=lrnr.model[0]
net1=lrnr.model[1] net2
= torch.nn.Sequential(
net2 =1),
torch.nn.AdaptiveAvgPool2d(output_size
torch.nn.Flatten(),512,out_features=2,bias=False)) torch.nn.Linear(
=torch.nn.Sequential(net1,net2) net
=Learner(dls,net,metrics=accuracy) lrnr2
1) lrnr2.fine_tune(
epoch | train_loss | valid_loss | accuracy | time |
---|---|---|---|---|
0 | 0.014341 | 0.000901 | 1.000000 | 00:43 |
epoch | train_loss | valid_loss | accuracy | time |
---|---|---|---|---|
0 | 0.000014 | 0.000002 | 1.000000 | 00:43 |
= first(dls.test_dl([PILImage.create(get_image_files(Path('random_pet_one'))[2])]))
x_cat, = x_cat.to('cpu') x_cat
HCAM
Mode 1
= HCAM.HCAM(lrnr = lrnr2) one
=1600,input_img=x_cat) one.learner_thresh(Thresh
/home/csy/Dropbox/blog/posts/CAM/HCAM/learners.py:66: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
power_threshed=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[0].detach().reshape(-1))**2)))
/home/csy/Dropbox/blog/posts/CAM/HCAM/learners.py:67: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
self.ybar_threshed = np.where(power_threshed>Thresh,torch.tensor(camimg[0].detach().reshape(-1)),0)
=-0.05) one.learner_step(Rate
=x_cat) one.prob(input_img
=x_cat) one.mode_decomp(input_img
# one(input_img=x_cat)
=x_cat,
HCAM.plot(dls,input_img=one(input_img=x_cat)['x'],input_img1_res=one(input_img=x_cat)['x_res'],
input_img1=0.3, one_res=0.2) one
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Mode 2
=1600,input_img=one(input_img=x_cat)['x_res']) one.learner_thresh(Thresh
=-0.05) one.learner_step(Rate
=one(input_img=x_cat)['x_res']) one.prob(input_img
=one(input_img=x_cat)['x_res']) one.mode_decomp(input_img
# one(input_img=one(input_img=x_cat)['x_res'])
=x_cat,
HCAM.plot(dls,input_img=one(input_img=x_cat)['x'],input_img1_res=one(input_img=x_cat)['x_res'],
input_img1=one(input_img=one(input_img=x_cat)['x'])['x'],input_img2_res=one(input_img=one(input_img=x_cat)['x_res'])['x_res'],
input_img2=0.35, one_res=0.2, two=0.5, two_res=0.2) one
Other Methods
= HCAM.Other_method(lrnr=lrnr2, status='cpu', cam_method='gradcam',input_img=x_cat) gradcam
0].squeeze().show()
dls.train.decode((x_cat,))[=0.7) plt.imshow(gradcam.squeeze(), alpha
= HCAM.Other_method(lrnr=lrnr2, status='cpu', cam_method='hirescam',input_img=x_cat) hirescam
0].squeeze().show()
dls.train.decode((x_cat,))[=0.7) plt.imshow(hirescam.squeeze(), alpha
= HCAM.Other_method(lrnr=lrnr2, status='cpu', cam_method='gradcamplusplus',input_img=x_cat) gradcamplusplus
0].squeeze().show()
dls.train.decode((x_cat,))[=0.7) plt.imshow(gradcamplusplus.squeeze(), alpha
= HCAM.Other_method(lrnr=lrnr2, status='cpu', cam_method='ablationcam',input_img=x_cat) ablationcam
100%|██████████| 16/16 [00:27<00:00, 1.69s/it]
100%|██████████| 16/16 [00:27<00:00, 1.70s/it]
100%|██████████| 16/16 [00:26<00:00, 1.67s/it]
0].squeeze().show()
dls.train.decode((x_cat,))[=0.7) plt.imshow(ablationcam.squeeze(), alpha
= HCAM.Other_method(lrnr=lrnr2, status='cpu', cam_method='xgradcam',input_img=x_cat) xgradcam
0].squeeze().show()
dls.train.decode((x_cat,))[=0.7) plt.imshow(xgradcam.squeeze(), alpha
= HCAM.Other_method(lrnr=lrnr2, status='cpu', cam_method='eigencam',input_img=x_cat) eigencam
0].squeeze().show()
dls.train.decode((x_cat,))[=0.7) plt.imshow(eigencam.squeeze(), alpha
= HCAM.Other_method(lrnr=lrnr2, status='cpu', cam_method='fullgrad',input_img=x_cat) fullgrad
Warning: target_layers is ignored in FullGrad. All bias layers will be used instead
0].squeeze().show()
dls.train.decode((x_cat,))[=0.7) plt.imshow(fullgrad.squeeze(), alpha
= HCAM.Other_method(lrnr=lrnr2, status='cpu', cam_method='eigengradcam',input_img=x_cat) eigengradcam
0].squeeze().show()
dls.train.decode((x_cat,))[=0.7) plt.imshow(eigengradcam.squeeze(), alpha
= HCAM.Other_method(lrnr=lrnr2, status='cpu', cam_method='layercam',input_img=x_cat) layercam
0].squeeze().show()
dls.train.decode((x_cat,))[=0.7) plt.imshow(layercam.squeeze(), alpha