import torch
from fastai.vision.all import *
import cv2
'CUDA_LAUNCH_BLOCKING'] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ[import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageFile
from PIL import Image
= True
ImageFile.LOAD_TRUNCATED_IMAGESfrom torchvision.utils import save_image
import os
[CAM]HCAM random
CAM
https://seoyeonc.github.io/chch/cnn/feature%20extraction/big%20data%20analysis/2022/01/11/bd_9주차.html
https://seoyeonc.github.io/chch/cam/2022/01/10/bd-8주차_1.html
CNN으로 이미지 분류를 할 때 마지막 단의 출력값이 클수록 softmax를 거친 뒤 1에 가까워 진다면, 입력 이미지의 label에 해당하는 채널의 마지막 conv layer의 출력이 크게 하는 클래스에 크게 반응했다는 것이 됌..!
import
import rpy2
import rpy2.robjects as ro
from rpy2.robjects.vectors import FloatVector
from rpy2.robjects.packages import importr
def label_func(f):
if f[0].isupper():
return 'cat'
else:
return 'dog'
학습
=Path('random_pet_one') #랜덤박스넣은사진 path_r
=get_image_files(path_r) files
=ImageDataLoaders.from_name_func(path_r,files,label_func,item_tfms=Resize(512)) dls_r
=cnn_learner(dls_r,resnet34,metrics=error_rate)
lrnr_r11) lrnr_r1.fine_tune(
/home/csy/anaconda3/envs/temp_csy/lib/python3.8/site-packages/fastai/vision/learner.py:288: UserWarning: `cnn_learner` has been renamed to `vision_learner` -- please update your code
warn("`cnn_learner` has been renamed to `vision_learner` -- please update your code")
/home/csy/anaconda3/envs/temp_csy/lib/python3.8/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.
warnings.warn(
/home/csy/anaconda3/envs/temp_csy/lib/python3.8/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet34_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet34_Weights.DEFAULT` to get the most up-to-date weights.
warnings.warn(msg)
0.00% [0/1 00:00<?]
epoch | train_loss | valid_loss | error_rate | time |
---|
86.96% [80/92 23:27<03:31 0.1528]
=lrnr_r1.model[0]
net_1=lrnr_r1.model[1] net_2
= torch.nn.Sequential(
net_2 =1),
torch.nn.AdaptiveAvgPool2d(output_size
torch.nn.Flatten(),512,out_features=2,bias=False)) torch.nn.Linear(
=torch.nn.Sequential(net_1,net_2) net_r
=Learner(dls_r,net_r,metrics=accuracy) lrnr_r2
10) lrnr_r2.fine_tune(
= ClassificationInterpretation.from_learner(lrnr_r2)
interp interp.plot_confusion_matrix()
Accuracy
= [] #고양이를 고양이라고 잘 맞춤
cat_acc_s = [] #강아지를 고양이라고 맞춤
dog_acc_s = [] #강아지를 강아지라고 잘 맞춤
cat_acc_f = [] #고양이를 강아지라고 맞춤
dog_acc_f
for i in range(len(path_res1.ls())) :
= first(dls_r.test_dl([PILImage.create(get_image_files(path_r)[i])]))
x, = torch.einsum('ij,jkl -> ikl', net_2[2].weight, net_1(x).squeeze())
camimg = net_r(x).tolist()[0]
a,b = np.exp(a)/ (np.exp(a)+np.exp(b)) , np.exp(b)/ (np.exp(a)+np.exp(b))
catprob, dogprob if catprob>dogprob:
if label_func(str(list(path_r.ls())[i]).split('/')[-1]) == 'cat' :
round(5))
cat_acc_s.append(catprob.else :
round(5))
cat_acc_f.append(catprob.else:
if label_func(str(list(path_r.ls())[i]).split('/')[-1]) == 'dog' :
round(5))
dog_acc_s.append(dogprob.else :
round(5)) dog_acc_f.append(dogprob.
print(len(cat_acc_s))
print(len(cat_acc_f))
print(len(dog_acc_s))
print(len(dog_acc_f))
954
888
1125
50
print(sum(cat_acc_s)/len(cat_acc_s) * 100)
print(sum(cat_acc_f)/len(cat_acc_f) * 100)
print(sum(dog_acc_s)/len(dog_acc_s) * 100)
print(sum(dog_acc_f)/len(dog_acc_f) * 100)
81.43121802935015
64.14542680180179
74.7282746666667
55.289880000000004
Visualization
# # 서연 수정 code
# fig, (ax1,ax2) = plt.subplots(1,2)
# #
# dls_r.train.decode((x,))[0].squeeze().show(ax=ax1)
# ax1.imshow(camimg[0].to("cpu").detach(),alpha=0.7,extent=(0,511,511,0),interpolation='spline36',cmap='magma')
# #
# dls_r.train.decode((x,))[0].squeeze().show(ax=ax2)
# ax2.imshow(camimg[1].to("cpu").detach(),alpha=0.7,extent=(0,511,511,0),interpolation='spline36',cmap='magma')
# fig.set_figwidth(8)
# fig.set_figheight(8)
# fig.tight_layout()
# fig, ax = plt.subplots(5,5)
# k=0
# for i in range(5):
# for j in range(5):
# x, = first(dls_r.test_dl([PILImage.create(get_image_files(path_r)[k])]))
# camimg = torch.einsum('ij,jkl -> ikl', net_2[2].weight, net_1(x).squeeze())
# a,b = net_r(x).tolist()[0]
# catprob, dogprob = np.exp(a)/ (np.exp(a)+np.exp(b)) , np.exp(b)/ (np.exp(a)+np.exp(b))
# if catprob>dogprob:
# dls_r.train.decode((x,))[0].squeeze().show(ax=ax[i][j])
# ax[i][j].imshow(camimg[0].to("cpu").detach(),alpha=0.7,extent=(0,512,512,0),interpolation='bilinear',cmap='bone')
# ax[i][j].set_title("cat(%s)" % catprob.round(5))
# else:
# dls_r.train.decode((x,))[0].squeeze().show(ax=ax[i][j])
# ax[i][j].imshow(camimg[1].to("cpu").detach(),alpha=0.7,extent=(0,512,512,0),interpolation='bilinear',cmap='bone')
# ax[i][j].set_title("dog(%s)" % dogprob.round(5))
# k=k+1
# fig.set_figwidth(16)
# fig.set_figheight(16)
# fig.tight_layout()
thresholding point
= plt.subplots(5,5)
fig, ax =0
kfor i in range(5):
for j in range(5):
= first(dls_r.test_dl([PILImage.create(get_image_files(path_r)[k])]))
x, = torch.einsum('ij,jkl -> ikl', net_2[2].weight, net_1(x).squeeze())
camimg = net_r(x).tolist()[0]
a,b = np.exp(a)/ (np.exp(a)+np.exp(b)) , np.exp(b)/ (np.exp(a)+np.exp(b))
catprob, dogprob = importr('EbayesThresh').ebayesthresh
ebayesthresh =np.array(ebayesthresh(FloatVector(torch.tensor(camimg[0].detach().reshape(-1))**2)))
power_threshed= np.where(power_threshed>2000,torch.tensor(camimg[0].detach().reshape(-1)),0)
ybar_threshed =np.array(ebayesthresh(FloatVector(torch.tensor(camimg[1].detach().reshape(-1))**2)))
power_threshed2= np.where(power_threshed>2000,torch.tensor(camimg[1].detach().reshape(-1)),0)
ybar_threshed2 = torch.tensor(ybar_threshed.reshape(16,16))
ybar_threshed = torch.tensor(ybar_threshed2.reshape(16,16))
ybar_threshed2 if catprob>dogprob:
# test=camimg[0]-torch.min(camimg[0])
=torch.exp(-0.1*ybar_threshed)
A1=np.array(A1.to("cpu").detach(),dtype=np.float32)
X1=torch.Tensor(cv2.resize(X1,(512,512),interpolation=cv2.INTER_LINEAR))
Y1=x.squeeze().to('cpu')*Y1-torch.min(x.squeeze().to('cpu'))*Y1
x1*0.25).squeeze().show(ax=ax[i][j])
(x1"cat(%s)" % catprob.round(5))
ax[i][j].set_title(else:
# test=camimg[1]-torch.min(camimg[1])
=torch.exp(-0.1*ybar_threshed2)
A1=np.array(A1.to("cpu").detach(),dtype=np.float32)
X1=torch.Tensor(cv2.resize(X1,(512,512),interpolation=cv2.INTER_LINEAR))
Y1=x.squeeze().to('cpu')*Y1-torch.min(x.squeeze().to('cpu'))*Y1
x1*0.25).squeeze().show(ax=ax[i][j])
(x1"dog(%s)" % dogprob.round(5))
ax[i][j].set_title(=k+1
k16)
fig.set_figwidth(16)
fig.set_figheight( fig.tight_layout()
UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
power_threshed=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[0].detach().reshape(-1))**2)))
<ipython-input-16-98bb7127f6af>:11: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
ybar_threshed = np.where(power_threshed>2000,torch.tensor(camimg[0].detach().reshape(-1)),0)
<ipython-input-16-98bb7127f6af>:12: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
power_threshed2=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[1].detach().reshape(-1))**2)))
<ipython-input-16-98bb7127f6af>:13: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
ybar_threshed2 = np.where(power_threshed>2000,torch.tensor(camimg[1].detach().reshape(-1)),0)
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
ebayes X
= plt.subplots(5,5)
fig, ax =0
kfor i in range(5):
for j in range(5):
= first(dls_r.test_dl([PILImage.create(get_image_files(path_r)[k])]))
x, = torch.einsum('ij,jkl -> ikl', net_2[2].weight, net_1(x).squeeze())
camimg = net_r(x).tolist()[0]
a,b = np.exp(a)/ (np.exp(a)+np.exp(b)) , np.exp(b)/ (np.exp(a)+np.exp(b))
catprob, dogprob if catprob>dogprob:
=camimg[0]-torch.min(camimg[0])
test=torch.exp(-0.1*test)
A1=np.array(A1.to("cpu").detach(),dtype=np.float32)
X1=torch.Tensor(cv2.resize(X1,(512,512),interpolation=cv2.INTER_LINEAR))
Y1=x.squeeze().to('cpu')*Y1-torch.min(x.squeeze().to('cpu'))*Y1
x1*0.25).squeeze().show(ax=ax[i][j])
(x1"cat(%s)" % catprob.round(5))
ax[i][j].set_title(else:
=camimg[1]-torch.min(camimg[1])
test=torch.exp(-0.1*test)
A1=np.array(A1.to("cpu").detach(),dtype=np.float32)
X1=torch.Tensor(cv2.resize(X1,(512,512),interpolation=cv2.INTER_LINEAR))
Y1=x.squeeze().to('cpu')*Y1-torch.min(x.squeeze().to('cpu'))*Y1
x1*0.25).squeeze().show(ax=ax[i][j])
(x1"dog(%s)" % dogprob.round(5))
ax[i][j].set_title(=k+1
k16)
fig.set_figwidth(16)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Step by step
CAT
= first(dls_r.test_dl([PILImage.create(get_image_files(path_r)[2])])) x,
= torch.einsum('ij,jkl -> ikl', net_2[2].weight, net_1(x).squeeze()) camimg
= importr('EbayesThresh').ebayesthresh
ebayesthresh
=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[0].detach().reshape(-1))**2)))
power_threshed= np.where(power_threshed>1600,torch.tensor(camimg[0].detach().reshape(-1)),0)
ybar_threshed = torch.tensor(ybar_threshed.reshape(16,16))
ybar_threshed
=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[1].detach().reshape(-1))**2)))
power_threshed2= np.where(power_threshed2>1600,torch.tensor(camimg[1].detach().reshape(-1)),0)
ybar_threshed2 = torch.tensor(ybar_threshed2.reshape(16,16)) ybar_threshed2
UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
power_threshed=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[0].detach().reshape(-1))**2)))
<ipython-input-17-65dd3aa3cab3>:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
ybar_threshed = np.where(power_threshed>1600,torch.tensor(camimg[0].detach().reshape(-1)),0)
<ipython-input-17-65dd3aa3cab3>:7: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
power_threshed2=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[1].detach().reshape(-1))**2)))
<ipython-input-17-65dd3aa3cab3>:8: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
ybar_threshed2 = np.where(power_threshed2>1600,torch.tensor(camimg[1].detach().reshape(-1)),0)
= plt.subplots(1,3)
fig, (ax1,ax2,ax3) #
0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))["Input image")
ax1.set_title(#
0].squeeze().show(ax=ax2)
dls_r.train.decode((x,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow((ybar_threshed).to("CAT PART")
ax2.set_title(#
0].squeeze().show(ax=ax3)
dls_r.train.decode((x,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax3.imshow((ybar_threshed2).to("DOG PART")
ax3.set_title(#
12)
fig.set_figwidth(12)
fig.set_figheight( fig.tight_layout()
- 판단 근거가 강할 수록 파란색 -> 보라색
= net_r(x).tolist()[0] a,b
/ (np.exp(a)+np.exp(b)) , np.exp(b)/ (np.exp(a)+np.exp(b)) np.exp(a)
(0.9999988566557949, 1.1433442051371491e-06)
mode 1
# test=camimg_o[0]-torch.min(camimg_o[0])
=torch.exp(-0.05*(ybar_threshed))
A1= 1 - A1 A2
= plt.subplots(1,2)
fig, (ax1,ax2) #
0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(A2.data.to("MODE1 WEIGHTT")
ax1.set_title(#
0].squeeze().show(ax=ax2)
dls_r.train.decode((x,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(A1.data.to("MODE1 RES WEIGHT")
ax2.set_title(#
8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
# mode 1 res
=np.array(A1.to("cpu").detach(),dtype=np.float32)
X1=torch.Tensor(cv2.resize(X1,(512,512),interpolation=cv2.INTER_LINEAR))
Y1=x.squeeze().to('cpu')*Y1-torch.min(x.squeeze().to('cpu'))*Y1
x1
# mode 1
=np.array(A2.to("cpu").detach(),dtype=np.float32)
X12=torch.Tensor(cv2.resize(X12,(512,512),interpolation=cv2.INTER_LINEAR))
Y12=x.squeeze().to('cpu')*Y12-torch.min(x.squeeze().to('cpu'))*Y12 x12
-
1st CAM 분리
= plt.subplots(1,1)
fig, (ax1) 0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))["ORIGINAL")
ax1.set_title(4)
fig.set_figwidth(4)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *0.35).squeeze().show(ax=ax1) #MODE1
(x12*0.2).squeeze().show(ax=ax2) #MODE1_res
(x1"MODE1")
ax1.set_title("MODE1 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
= x1.reshape(1,3,512,512) x1
'cpu')
net_1.to('cpu') net_2.to(
Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Flatten(start_dim=1, end_dim=-1)
(2): Linear(in_features=512, out_features=2, bias=False)
)
= torch.einsum('ij,jkl -> ikl', net_2[2].weight, net_1(x1).squeeze()) camimg1
= net_r(x1).tolist()[0] a1,b1
/ (np.exp(a1)+np.exp(b1)) , np.exp(b1)/ (np.exp(a1)+np.exp(b1)) np.exp(a1)
(0.9505286776057943, 0.04947132239420577)
-
mode1 res
= plt.subplots(1,2)
fig, (ax1,ax2) #
*0.25).squeeze().show(ax=ax1)
(x10].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(camimg1["CAT PART")
ax1.set_title(#
*0.25).squeeze().show(ax=ax2)
(x11].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(camimg1["DOG PART")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
-
첫번째 CAM 결과와 비교
= plt.subplots(1,2)
fig, (ax1,ax2) #
0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(camimg["1ST CAM")
ax1.set_title(#
0].squeeze().show(ax=ax2)
dls_r.train.decode((x,))[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(camimg1["2ND CAM")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
-
2nd CAM 분리
=camimg1[0]-torch.min(camimg1[0])
test1= torch.exp(-0.03*(test1))
A3 = 1 - A3 A4
= plt.subplots(1,2)
fig, (ax1,ax2) #
=ax2)
x1.squeeze().show(ax0].squeeze().show(ax=ax1)
dls_r.train.decode((x1,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(A3.data.to("MODE2 RES WEIGHT")
ax1.set_title(#
=ax2)
x1.squeeze().show(ax0].squeeze().show(ax=ax2)
dls_r.train.decode((x1,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(A4.data.to("MODE2 WEIGHT")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
=np.array(A3.to("cpu").detach(),dtype=np.float32)
X2
=torch.Tensor(cv2.resize(X2,(512,512),interpolation=cv2.INTER_LINEAR))
Y2
=(x1*0.2)*Y2-torch.min((x1*0.2)*Y2)
x2
=np.array(A4.to("cpu").detach(),dtype=np.float32)
X22
=torch.Tensor(cv2.resize(X22,(512,512),interpolation=cv2.INTER_LINEAR))
Y22
=(x1*0.2)*Y22-torch.min((x1*0.2)*Y22) x22
= plt.subplots(1,1)
fig, (ax1) 0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))["ORIGINAL")
ax1.set_title(4)
fig.set_figwidth(4)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *0.3).squeeze().show(ax=ax1) #MODE1
(x12*0.2).squeeze().show(ax=ax2) #MODE1_res
(x1"MODE1")
ax1.set_title("MODE1 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *4).squeeze().show(ax=ax1) #MODE2
(x22=ax2) #MODE2_res
(x2).squeeze().show(ax"MODE2")
ax1.set_title("MODE2 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
= x2.reshape(1,3,512,512) x2
'cpu')
net_1.to('cpu') net_2.to(
Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Flatten(start_dim=1, end_dim=-1)
(2): Linear(in_features=512, out_features=2, bias=False)
)
= torch.einsum('ij,jkl -> ikl', net_2[2].weight, net_1(x2).squeeze()) camimg2
= net_r(x2).tolist()[0]
a2,b2 /(np.exp(a2)+np.exp(b2)), np.exp(b2)/(np.exp(a2)+np.exp(b2)) np.exp(a2)
(0.538050281567858, 0.461949718432142)
-
mode2 res 에 CAM 결과 올리기
= plt.subplots(1,2)
fig, (ax1, ax2) #
=ax1)
x2.squeeze().show(ax0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(camimg2["CAT PART")
ax1.set_title(#
=ax2)
x2.squeeze().show(ax1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(camimg2["DOG PART")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
= plt.subplots(1,3)
fig, (ax1,ax2,ax3) #
0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(camimg["1ST CAM")
ax1.set_title(#
0].squeeze().show(ax=ax2)
dls_r.train.decode((x,))[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(camimg1["2ND CAM")
ax2.set_title(#
0].squeeze().show(ax=ax3)
dls_r.train.decode((x,))[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax3.imshow(camimg2["3RD CAM")
ax3.set_title(12)
fig.set_figwidth(12)
fig.set_figheight( fig.tight_layout()
mode 3 만들기
=camimg2[0]-torch.min(camimg2[0]) test2
= torch.exp(-0.05*(test2)) A5
= 1 - A5 A6
= plt.subplots(1,2)
fig, (ax1, ax2) #
=ax1)
x2.squeeze().show(ax0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(camimg2["CAT PART")
ax1.set_title(#
=ax2)
x2.squeeze().show(ax1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(camimg2["DOG PART")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
#mode 3 res
=np.array(A5.to("cpu").detach(),dtype=np.float32)
X3=torch.Tensor(cv2.resize(X3,(512,512),interpolation=cv2.INTER_LINEAR))
Y3=x2*Y3-torch.min(x2*Y3)
x3# mode 3
=np.array(A6.to("cpu").detach(),dtype=np.float32)
X32=torch.Tensor(cv2.resize(X32,(512,512),interpolation=cv2.INTER_LINEAR))
Y32=x2*Y32-torch.min(x2*Y32) x32
= plt.subplots(1,1)
fig, (ax1) 0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))["ORIGINAL")
ax1.set_title(4)
fig.set_figwidth(4)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *0.3).squeeze().show(ax=ax1) #MODE1
(x12*0.2).squeeze().show(ax=ax2) #MODE1_res
(x1"MODE1")
ax1.set_title("MODE1 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *4).squeeze().show(ax=ax1) #MODE2
(x22=ax2) #MODE2_res
(x2).squeeze().show(ax"MODE2")
ax1.set_title("MODE2 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *8).squeeze().show(ax=ax1) #MODE3
(x32=ax2) #MODE3_res
(x3).squeeze().show(ax"MODE3")
ax1.set_title("MODE3 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
= plt.subplots(1,1)
fig, (ax1) *0.3).squeeze().show(ax=ax1) #MODE1
(x12"MODE1")
ax1.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
= plt.subplots(1,1)
fig, (ax1) *0.3 + x22*4).squeeze().show(ax=ax1) #MODE1+MODE2
(x12"MODE1+MODE2")
ax1.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
= plt.subplots(1,1)
fig, (ax1) *0.3 + x22*4 + x32*2).squeeze().show(ax=ax1) #MODE1+MODE2+MODE3
(x12"MODE3+MODE2+MODE3")
ax1.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
DOG
= first(dls_r.test_dl([PILImage.create(get_image_files(path_r)[12])])) x,
= torch.einsum('ij,jkl -> ikl', net_2[2].weight, net_1(x).squeeze()) camimg
= importr('EbayesThresh').ebayesthresh
ebayesthresh
=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[0].detach().reshape(-1))**2)))
power_threshed= np.where(power_threshed>2000,torch.tensor(camimg[0].detach().reshape(-1)),0)
ybar_threshed = torch.tensor(ybar_threshed.reshape(16,16))
ybar_threshed
=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[1].detach().reshape(-1))**2)))
power_threshed2= np.where(power_threshed2>1500,torch.tensor(camimg[1].detach().reshape(-1)),0)
ybar_threshed2 = torch.tensor(ybar_threshed2.reshape(16,16)) ybar_threshed2
UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
power_threshed=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[0].detach().reshape(-1))**2)))
<ipython-input-1476-087f3789280b>:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
ybar_threshed = np.where(power_threshed>2000,torch.tensor(camimg[0].detach().reshape(-1)),0)
<ipython-input-1476-087f3789280b>:7: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
power_threshed2=np.array(ebayesthresh(FloatVector(torch.tensor(camimg[1].detach().reshape(-1))**2)))
<ipython-input-1476-087f3789280b>:8: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
ybar_threshed2 = np.where(power_threshed2>1500,torch.tensor(camimg[1].detach().reshape(-1)),0)
= plt.subplots(1,3)
fig, (ax1,ax2,ax3) #
0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))["Input image")
ax1.set_title(#
0].squeeze().show(ax=ax2)
dls_r.train.decode((x,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow((ybar_threshed).to("CAT PART")
ax2.set_title(#
0].squeeze().show(ax=ax3)
dls_r.train.decode((x,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax3.imshow((ybar_threshed2).to("DOG PART")
ax3.set_title(#
12)
fig.set_figwidth(12)
fig.set_figheight( fig.tight_layout()
- 판단 근거가 강할 수록 파란색 -> 보라색
= net_r(x).tolist()[0] a,b
/ (np.exp(a)+np.exp(b)) , np.exp(b)/ (np.exp(a)+np.exp(b)) np.exp(a)
(6.840020369054416e-09, 0.9999999931599796)
mode 1
# test=camimg_o[0]-torch.min(camimg_o[0])
=torch.exp(-0.05*(ybar_threshed2))
A1= 1 - A1 A2
= plt.subplots(1,2)
fig, (ax1,ax2) #
0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(A2.data.to("MODE1 WEIGHTT")
ax1.set_title(#
0].squeeze().show(ax=ax2)
dls_r.train.decode((x,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(A1.data.to("MODE1 RES WEIGHT")
ax2.set_title(#
8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
# mode 1 res
=np.array(A1.to("cpu").detach(),dtype=np.float32)
X1=torch.Tensor(cv2.resize(X1,(512,512),interpolation=cv2.INTER_LINEAR))
Y1=x.squeeze().to('cpu')*Y1-torch.min(x.squeeze().to('cpu'))*Y1
x1
# mode 1
=np.array(A2.to("cpu").detach(),dtype=np.float32)
X12=torch.Tensor(cv2.resize(X12,(512,512),interpolation=cv2.INTER_LINEAR))
Y12=x.squeeze().to('cpu')*Y12-torch.min(x.squeeze().to('cpu'))*Y12 x12
-
1st CAM 분리
= plt.subplots(1,1)
fig, (ax1) 0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))["ORIGINAL")
ax1.set_title(4)
fig.set_figwidth(4)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *0.3).squeeze().show(ax=ax1) #MODE1
(x12*0.3).squeeze().show(ax=ax2) #MODE1_res
(x1"MODE1")
ax1.set_title("MODE1 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
= x1.reshape(1,3,512,512) x1
'cpu')
net_1.to('cpu') net_2.to(
Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Flatten(start_dim=1, end_dim=-1)
(2): Linear(in_features=512, out_features=2, bias=False)
)
= torch.einsum('ij,jkl -> ikl', net_2[2].weight, net_1(x1).squeeze()) camimg1
= net_r(x1).tolist()[0] a1,b1
/ (np.exp(a1)+np.exp(b1)) , np.exp(b1)/ (np.exp(a1)+np.exp(b1)) np.exp(a1)
(0.001180554413474461, 0.9988194455865255)
-
mode1 res
= plt.subplots(1,2)
fig, (ax1,ax2) #
*0.25).squeeze().show(ax=ax1)
(x10].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(camimg1["CAT PART")
ax1.set_title(#
*0.25).squeeze().show(ax=ax2)
(x11].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(camimg1["DOG PART")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
-
첫번째 CAM 결과와 비교
= plt.subplots(1,2)
fig, (ax1,ax2) #
0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(camimg["1ST CAM")
ax1.set_title(#
0].squeeze().show(ax=ax2)
dls_r.train.decode((x,))[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(camimg1["2ND CAM")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
-
2nd CAM 분리
=camimg1[1]-torch.min(camimg1[1])
test1= torch.exp(-0.05*(test1))
A3 = 1 - A3 A4
= plt.subplots(1,2)
fig, (ax1,ax2) #
=ax2)
x1.squeeze().show(ax0].squeeze().show(ax=ax1)
dls_r.train.decode((x1,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(A3.data.to("MODE2 RES WEIGHT")
ax1.set_title(#
=ax2)
x1.squeeze().show(ax0].squeeze().show(ax=ax2)
dls_r.train.decode((x1,))["cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(A4.data.to("MODE2 WEIGHT")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
# res
=np.array(A3.to("cpu").detach(),dtype=np.float32)
X2=torch.Tensor(cv2.resize(X2,(512,512),interpolation=cv2.INTER_LINEAR))
Y2=(x1*0.2)*Y2-torch.min((x1*0.2)*Y2)
x2#
=np.array(A4.to("cpu").detach(),dtype=np.float32)
X22=torch.Tensor(cv2.resize(X22,(512,512),interpolation=cv2.INTER_LINEAR))
Y22=(x1*0.2)*Y22-torch.min((x1*0.2)*Y22) x22
= plt.subplots(1,1)
fig, (ax1) 0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))["ORIGINAL")
ax1.set_title(4)
fig.set_figwidth(4)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *0.3).squeeze().show(ax=ax1) #MODE1
(x12*0.2).squeeze().show(ax=ax2) #MODE1_res
(x1"MODE1")
ax1.set_title("MODE1 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *3).squeeze().show(ax=ax1) #MODE2
(x22=ax2) #MODE2_res
(x2).squeeze().show(ax"MODE2")
ax1.set_title("MODE2 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
= x2.reshape(1,3,512,512) x2
'cpu')
net_1.to('cpu') net_2.to(
Sequential(
(0): AdaptiveAvgPool2d(output_size=1)
(1): Flatten(start_dim=1, end_dim=-1)
(2): Linear(in_features=512, out_features=2, bias=False)
)
= torch.einsum('ij,jkl -> ikl', net_2[2].weight, net_1(x2).squeeze()) camimg2
= net_r(x2).tolist()[0]
a2,b2 /(np.exp(a2)+np.exp(b2)), np.exp(b2)/(np.exp(a2)+np.exp(b2)) np.exp(a2)
(0.48014948791345896, 0.5198505120865412)
-
mode2 res 에 CAM 결과 올리기
= plt.subplots(1,2)
fig, (ax1, ax2) #
=ax1)
x2.squeeze().show(ax0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(camimg2["CAT PART")
ax1.set_title(#
=ax2)
x2.squeeze().show(ax1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(camimg2["DOG PART")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
= plt.subplots(1,3)
fig, (ax1,ax2,ax3) #
0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(camimg["1ST CAM")
ax1.set_title(#
0].squeeze().show(ax=ax2)
dls_r.train.decode((x,))[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(camimg1["2ND CAM")
ax2.set_title(#
0].squeeze().show(ax=ax3)
dls_r.train.decode((x,))[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax3.imshow(camimg2["3RD CAM")
ax3.set_title(12)
fig.set_figwidth(12)
fig.set_figheight( fig.tight_layout()
mode 3 만들기 더이상 분리되지 않는 듯
=camimg2[1]-torch.min(camimg2[1]) test2
= torch.exp(-0.05*(test2)) A5
= 1 - A5 A6
= plt.subplots(1,2)
fig, (ax1, ax2) #
=ax1)
x2.squeeze().show(ax0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax1.imshow(camimg2["CAT PART")
ax1.set_title(#
=ax2)
x2.squeeze().show(ax1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='cool')
ax2.imshow(camimg2["DOG PART")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
#mode 3 res
=np.array(A5.to("cpu").detach(),dtype=np.float32)
X3=torch.Tensor(cv2.resize(X3,(512,512),interpolation=cv2.INTER_LINEAR))
Y3=x2*Y3-torch.min(x2*Y3)
x3# mode 3
=np.array(A6.to("cpu").detach(),dtype=np.float32)
X32=torch.Tensor(cv2.resize(X32,(512,512),interpolation=cv2.INTER_LINEAR))
Y32=x2*Y32-torch.min(x2*Y32) x32
= plt.subplots(1,1)
fig, (ax1) 0].squeeze().show(ax=ax1)
dls_r.train.decode((x,))["ORIGINAL")
ax1.set_title(4)
fig.set_figwidth(4)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *0.3).squeeze().show(ax=ax1) #MODE1
(x12*0.2).squeeze().show(ax=ax2) #MODE1_res
(x1"MODE1")
ax1.set_title("MODE1 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *4).squeeze().show(ax=ax1) #MODE2
(x22=ax2) #MODE2_res
(x2).squeeze().show(ax"MODE2")
ax1.set_title("MODE2 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight(
fig.tight_layout()#
= plt.subplots(1,2)
fig, (ax1, ax2) *8).squeeze().show(ax=ax1) #MODE3
(x32=ax2) #MODE3_res
(x3).squeeze().show(ax"MODE3")
ax1.set_title("MODE3 RES")
ax2.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
= plt.subplots(1,1)
fig, (ax1) *0.3).squeeze().show(ax=ax1) #MODE1
(x12"MODE1")
ax1.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
= plt.subplots(1,1)
fig, (ax1) *0.3 + x22*4).squeeze().show(ax=ax1) #MODE1+MODE2
(x12"MODE1+MODE2")
ax1.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
= plt.subplots(1,1)
fig, (ax1) *0.3 + x22*4 + x32*2).squeeze().show(ax=ax1) #MODE1+MODE2+MODE3
(x12"MODE3+MODE2+MODE3")
ax1.set_title(8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).