import torch
from fastai.vision.all import *
import cv2
import numpy as np
import os
'CUDA_LAUNCH_BLOCKING'] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ[import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageFile
from PIL import Image
= True
ImageFile.LOAD_TRUNCATED_IMAGES from torchvision.utils import save_image
import os
https://seoyeonc.github.io/chch/cnn/feature%20extraction/big%20data%20analysis/2022/01/11/bd_9주차.html
https://seoyeonc.github.io/chch/cam/2022/01/10/bd-8주차_1.html
import
import rpy2
import rpy2.robjects as ro
from rpy2.robjects.vectors import FloatVector
from rpy2.robjects.packages import importr
def label_func(f):
if f[0].isupper():
return 'cat'
else:
return 'dog'
원본 CAM
# os.mkdir("original_pet")
for i in range(len(path.ls())) :
= PILImage.create(get_image_files(path)[i])
img = img.resize([512,512], resample=None, box=None, reducing_gap=None)
img = (img.shape[0], img.shape[1])
(w, h) # a = random.uniform(0, w*0.7)
# b = random.uniform(0, h*0.9)
= [(a, b), (a+100, b+50)]
shape # font = ImageFont.truetype("DejaVuSans.ttf", round(h*0.08))
= str(list(path.ls())[i]).split('/')[-1]
name = name.split('.')[-1]
fname if name[0].isupper() == True :
= ImageDraw.Draw(img)
img1 # img1.rectangle(shape, fill ="white", outline ="black")
# ImageDraw.Draw(img).text((a, b), 'CAT', (0,0,0), font=font)
"original_pet/"+name)
img.save(else:
= ImageDraw.Draw(img)
img1 # img1.rectangle(shape, fill ="black", outline ="black")
# ImageDraw.Draw(img).text((a, b), 'DOG', (255,255,255), font=font)
"original_pet/"+name) img.save(
=Path('original_pet') #랜덤박스넣은사진 path_o
=get_image_files(path_o) files_o
=ImageDataLoaders.from_name_func(path_o,files_o,label_func,item_tfms=Resize(512)) dls_o
=cnn_learner(dls_o,resnet34,metrics=error_rate)
lrnr_o11) lrnr_o1.fine_tune(
=lrnr_o1.model[0]
net_o1=lrnr_o1.model[1] net_o2
= torch.nn.Sequential(
net_o2 =1),
torch.nn.AdaptiveAvgPool2d(output_size
torch.nn.Flatten(),512,out_features=2,bias=False)) torch.nn.Linear(
=torch.nn.Sequential(net_o1,net_o2) net_o
=Learner(dls_o,net_o,metrics=accuracy) lrnr_o2
10) lrnr_o2.fine_tune(
= ClassificationInterpretation.from_learner(lrnr_o2)
interp_o interp_o.plot_confusion_matrix()
= first(dls_o.test_dl([PILImage.create(get_image_files(path_o)[7389])])) x_o,
= torch.einsum('ij,jkl -> ikl', net_o2[2].weight, net_o1(x).squeeze()) camimg_o
# 서연 수정 code
= plt.subplots(1,2)
fig, (ax1,ax2) #
0].squeeze().show(ax=ax1)
dls_o.train.decode((x_o,))[0].to("cpu").detach(),alpha=0.7,extent=(0,511,511,0),interpolation='bilinear',cmap='bone')
ax1.imshow(camimg_o[#
0].squeeze().show(ax=ax2)
dls_r.train.decode((x_o,))[1].to("cpu").detach(),alpha=0.7,extent=(0,511,511,0),interpolation='bilinear',cmap='bone')
ax2.imshow(camimg_o[8)
fig.set_figwidth(8)
fig.set_figheight( fig.tight_layout()
= plt.subplots(5,5)
fig, ax =0
kfor i in range(5):
for j in range(5):
= first(dls_o.test_dl([PILImage.create(get_image_files(path_o)[k])]))
x_o, = torch.einsum('ij,jkl -> ikl', net_o2[2].weight, net_o1(x_o).squeeze())
camimg_o = net_r(x_o).tolist()[0]
a_o,b_o = np.exp(a_o)/ (np.exp(a_o)+np.exp(b_o)) , np.exp(b_o)/ (np.exp(a_o)+np.exp(b_o))
catprob, dogprob if catprob>dogprob:
0].squeeze().show(ax=ax[i][j])
dls_o.train.decode((x_o,))[0].to("cpu").detach(),alpha=0.7,extent=(0,512,512,0),interpolation='bilinear',cmap='bone')
ax[i][j].imshow(camimg_o["cat(%s)" % catprob.round(5))
ax[i][j].set_title(else:
0].squeeze().show(ax=ax[i][j])
dls_o.train.decode((x_o,))[1].to("cpu").detach(),alpha=0.7,extent=(0,512,512,0),interpolation='bilinear',cmap='bone')
ax[i][j].imshow(camimg_o["dog(%s)" % dogprob.round(5))
ax[i][j].set_title(=k+1
k16)
fig.set_figwidth(16)
fig.set_figheight( fig.tight_layout()
= plt.subplots(5,5)
fig, ax =0
kfor i in range(5):
for j in range(5):
= first(dls_o.test_dl([PILImage.create(get_image_files(path_o)[k])]))
x_o, = torch.einsum('ij,jkl -> ikl', net_o2[2].weight, net_o1(x).squeeze())
camimg_o = net_o(x_o).tolist()[0]
a_o,b_o = np.exp(a_o)/ (np.exp(a_o)+np.exp(b_o)) , np.exp(b_o)/ (np.exp(a_o)+np.exp(b_o))
catprob, dogprob if catprob>dogprob:
=camimg_o[0]-torch.min(camimg_o[0])
test=torch.exp(-0.1*test)
A1=np.array(A1.to("cpu").detach(),dtype=np.float32)
X1=torch.Tensor(cv2.resize(X1,(512,512),interpolation=cv2.INTER_LINEAR))
Y1=x_o.squeeze().to('cpu')*Y1-torch.min(x_o.squeeze().to('cpu'))*Y1
x1*0.25).squeeze().show(ax=ax[i][j])
(x1"cat(%s)" % catprob.round(5))
ax[i][j].set_title(else:
=camimg_o[1]-torch.min(camimg_o[1])
test=torch.exp(-0.1*test)
A1=np.array(A1.to("cpu").detach(),dtype=np.float32)
X1=torch.Tensor(cv2.resize(X1,(512,512),interpolation=cv2.INTER_LINEAR))
Y1=x_o.squeeze().to('cpu')*Y1-torch.min(x_o.squeeze().to('cpu'))*Y1
x1*0.25).squeeze().show(ax=ax[i][j])
(x1"dog(%s)" % dogprob.round(5))
ax[i][j].set_title(=k+1
k16)
fig.set_figwidth(16)
fig.set_figheight( fig.tight_layout()
.mat
파일 있나 확인
for i in range(len(path_o.ls())) :
= PILImage.create(get_image_files(path_o)[i])
img = img.resize([512,512], resample=None, box=None, reducing_gap=None)
img = str(list(path_o.ls())[i]).split('/')[-1]
name = name.split('.')[-1]
fname if fname!="jpg" :
print(name)
else : pass
= first(dls_o.test_dl([PILImage.create(get_image_files(path_o)[1])]))
x_o, = torch.einsum('ij,jkl -> ikl', net_o2[2].weight, net_o1(x).squeeze())
camimg_o = net_o(x_o).tolist()[0]
a_o,b_o = np.exp(a_o)/ (np.exp(a_o)+np.exp(b_o)) , np.exp(b_o)/ (np.exp(a_o)+np.exp(b_o))
catprob_o, dogprob_o if catprob_o>dogprob_o:
=camimg_o[0]-torch.min(camimg_o[0])
test_o=torch.exp(-0.01*test_o)
A1_o=np.array(A1_o.to("cpu").detach(),dtype=np.float32)
X1_o=torch.Tensor(cv2.resize(X1_o,(512,512),interpolation=cv2.INTER_LINEAR))
Y1_o=x_o.squeeze().to('cpu')*Y1_o-torch.min(x_o.squeeze().to('cpu'))*Y1_o
x1_o*0.25).squeeze().show()
(x1_oelse:
=camimg_o[1]-torch.min(camimg_o[1])
test_o=torch.exp(-0.01*test_o)
A1_o=np.array(A1_o.to("cpu").detach(),dtype=np.float32)
X1_o=torch.Tensor(cv2.resize(X1_o,(512,512),interpolation=cv2.INTER_LINEAR))
Y1_o=x_o.squeeze().to('cpu')*Y1-torch.min(x_o.squeeze().to('cpu'))*Y1_o
x1_o*0.25).squeeze().show() (x1_o
# #저장 참고
# np_arr = np.array(tensor, dtype=np.uint8)
# img = PIL.Image.fromarray(np_arr)
# img.save('path')
# name = str(list(path.ls())[1]).split('/')[-1]
# res1=(x1*0.35).squeeze()
# res1.show()
# save_image(res1, "pet3_mode1_res/"+name)
#res1.save("pet3_mode1_res/"+name)