diff --git a/FilterEvaluator/Evaluator copy.py b/FilterEvaluator/Evaluator copy.py new file mode 100644 index 0000000..a25d007 --- /dev/null +++ b/FilterEvaluator/Evaluator copy.py @@ -0,0 +1,113 @@ +from __future__ import print_function +import os +import sys +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torchvision +from torchvision import datasets, transforms +import torchvision.models as models +import matplotlib.pyplot as plt +import numpy as np +from torch.utils.data import Dataset, DataLoader +from PIL import Image +import random +import cv2 + + + +CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/" +print("Current Path :" + CurrentPath) + +sys.path.append(CurrentPath+'../tools') +sys.path.append(CurrentPath+'../') + +import Model as Model +from tools import utils, Train, Loader +import EvaluatorUnsuper + + + +batchsize = 128 + + +# model = utils.SetDevice(Model.Net5Grad35()) +# model = utils.SetDevice(Model.Net31535()) +model = utils.SetDevice(Model.Net3Grad335()) +# model = utils.SetDevice(Model.Net3()) + + +layers = model.PrintLayer() +layer = 0 + +model = utils.LoadModel(model, CurrentPath+"/checkpoint.pkl") + +weight = np.load(CurrentPath+"WeightSearch.npy") + + + + + +# traindata, testdata = Loader.MNIST(batchsize) +# traindata, testdata = Loader.RandomMnist(batchsize, style="Vertical") +# traindata, testdata = Loader.RandomMnist(batchsize, style="Horizontal") +# traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalOneLine") +# traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalZebra") +# traindata, testdata = Loader.Cifar10Mono(batchsize) +traindata, testdata = Loader.Cifar10Mono(batchsize, num_workers=0, shuffle=True) + + + + +# weight = EvaluatorUnsuper.UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=100000, SearchChannelRatio=8, Interation=10) +# np.save("WeightSearch.npy", weight) + +# weight = EvaluatorUnsuper.UnsuperLearnTrainWeight(model, layer, traindata) +# np.save("WeightTrain.npy", weight) + + + + +weight = np.load(CurrentPath+"WeightSearch.npy") + + +def DistanceOfKernel(k1,k2): + k1 = k1.reshape((1,-1)) + k1r = 1.0/k1.reshape((-1,1)) + k1dot = np.dot(k1r,k1) + k2 = k2.reshape((1,-1)) + k2r = 1.0/k2.reshape((-1,1)) + k2dot = np.dot(k2r,k2) + diff = np.abs(np.mean(k1dot - k2dot)) + return diff + + +indexs = np.random.randint(0,weight.shape[0],(10000,2)) + +mindis = 0 +manindex = [] +for i in indexs: + if i[0] == i[1]: + continue + dis = DistanceOfKernel(weight[i[0]], weight[i[1]]) + if dis > mindis: + manindex = i + mindis = dis + +a = weight[manindex[0]] +b = weight[manindex[1]] + +utils.NumpyToImage(a, CurrentPath+"image","a") +utils.NumpyToImage(b, CurrentPath+"image","b") + +a = 0 + + + + + + +# utils.NumpyToImage(weight, CurrentPath+"image") +# utils.SaveModel(model,CurrentPath+"/checkpoint.pkl") +print("save model sucess") diff --git a/FilterEvaluator/Evaluator.py b/FilterEvaluator/Evaluator.py index 5797e73..55a2490 100644 --- a/FilterEvaluator/Evaluator.py +++ b/FilterEvaluator/Evaluator.py @@ -1,10 +1,6 @@ from __future__ import print_function import os import sys - -# import multiprocessing -# multiprocessing.set_start_method('spawn', True) - import torch import torch.nn as nn import torch.nn.functional as F @@ -20,6 +16,7 @@ import random import cv2 + CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/" print("Current Path :" + CurrentPath) @@ -28,6 +25,9 @@ sys.path.append(CurrentPath+'../') import Model as Model from tools import utils, Train, Loader +import EvaluatorUnsuper + + batchsize = 128 @@ -40,8 +40,7 @@ model = utils.SetDevice(Model.Net3Grad335()) layers = model.PrintLayer() layer = 0 - -model = utils.LoadModel(model, CurrentPath+"/checkpoint.pkl") +# model = utils.LoadModel(model, CurrentPath+"/checkpoint.pkl") @@ -53,132 +52,43 @@ model = utils.LoadModel(model, CurrentPath+"/checkpoint.pkl") # traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalOneLine") # traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalZebra") # traindata, testdata = Loader.Cifar10Mono(batchsize) -traindata, testdata = Loader.Cifar10Mono(batchsize, num_workers=2, shuffle=True) - - - -def GetScore(netmodel,layer,SearchLayer,DataSet,Interation=-1): - netmodel.eval() - sample = utils.SetDevice(torch.empty((SearchLayer.out_channels,0))) - - layer = layer-1 - - # layerout = [] - # layerint = [] - # def getnet(self, input, output): - # layerout.append(output) - # layerint.append(input) - # handle = netmodel.features[layer].register_forward_hook(getnet) - # netmodel.ForwardLayer(data,layer) - # output = layerout[0][:,:,:,:] - # handle.remove() - - - for batch_idx, (data, target) in enumerate(DataSet): - data = utils.SetDevice(data) - target = utils.SetDevice(target) - output = netmodel.ForwardLayer(data,layer) - output = SearchLayer(output) - data.detach() - target.detach() - - output = torch.reshape(output.transpose(0,1),(SearchLayer.out_channels,-1)) - sample = torch.cat((sample,output),1) - if Interation > 0 and batch_idx >= (Interation-1): - break - - sample_mean=torch.mean(sample,dim=1,keepdim=True) - dat1 = torch.mean(torch.abs(sample - sample_mean),dim=1,keepdim=True) - dat2 = (sample - sample_mean)/dat1 - dat2 = torch.mean(dat2 * dat2,dim=1) - return dat2.cpu().detach().numpy() - -def UnsuperLearnSearchWeight(model, layer, dataloader, NumSearch=10000, SaveChannelRatio=500, SearchChannelRatio=1, Interation=10): - tl = model.features[layer] - newlayer = nn.Conv2d(tl.in_channels, tl.out_channels * SearchChannelRatio, tl.kernel_size, - tl.stride, tl.padding, tl.dilation, tl.groups, tl.bias, tl.padding_mode) - newlayer = utils.SetDevice(newlayer) - - newchannels = tl.out_channels * SaveChannelRatio - newweightshape = list(newlayer.weight.data.shape) - - minactive = np.empty((0)) - minweight = np.empty([0,newweightshape[1],newweightshape[2],newweightshape[3]]) - - for i in range(NumSearch): - newweight = np.random.uniform(-1.0,1.0,newweightshape).astype("float32") - newlayer.weight.data=utils.SetDevice(torch.from_numpy(newweight)) - - score = GetScore(model, layer, newlayer, dataloader, Interation) - - minactive = np.append(minactive, score) - minweight = np.concatenate((minweight, newweight)) - - index = minactive.argsort() - minactive = minactive[index[0:newchannels]] - minweight = minweight[index[0:newchannels]] - print("search random :" + str(i)) - if i % (NumSearch/10) == 0: - tl.data=utils.SetDevice(torch.from_numpy(minweight[0:tl.out_channels])) - utils.SaveModel(model, CurrentPath+"/checkpoint.pkl") - - tl.data=utils.SetDevice(torch.from_numpy(minweight[0:tl.out_channels])) - return minweight - - -def TrainLayer(netmodel, layer, SearchLayer, DataSet, Epoch=100): - netmodel.eval() - sample = utils.SetDevice(torch.empty((SearchLayer.out_channels,0))) - layer = layer-1 - SearchLayer.weight.data.requires_grad=True - optimizer = optim.SGD(SearchLayer.parameters(), lr=0.1) - for e in range(Epoch): - for batch_idx, (data, target) in enumerate(DataSet): - optimizer.zero_grad() - data = utils.SetDevice(data) - target = utils.SetDevice(target) - output = netmodel.ForwardLayer(data,layer) - output = SearchLayer(output) - sample = torch.reshape(output.transpose(0,1),(SearchLayer.out_channels,-1)) - sample_mean=torch.mean(sample,dim=1,keepdim=True) - dat1 = torch.mean(torch.abs(sample - sample_mean),dim=1,keepdim=True) - dat2 = (sample - sample_mean)/dat1 - dat2 = torch.mean(dat2 * dat2,dim=1) - label = dat2*0.5 - loss = F.l1_loss(dat2, label) - loss.backward() - optimizer.step() - print(" epoch :" + str(e)) - return SearchLayer.weight.data.cpu().detach().numpy() - -def UnsuperLearnTrainWeight(model, layer, dataloader, NumTrain=500, TrainChannelRatio=1, Epoch=20): - tl = model.features[layer] - newlayer = nn.Conv2d(tl.in_channels, tl.out_channels * TrainChannelRatio, tl.kernel_size, - tl.stride, tl.padding, tl.dilation, tl.groups, tl.bias, tl.padding_mode) - newlayer = utils.SetDevice(newlayer) - newweightshape = list(newlayer.weight.data.shape) - minactive = np.empty((0)) - minweight = np.empty([0,newweightshape[1],newweightshape[2],newweightshape[3]]) - for i in range(NumTrain): - newweight = np.random.uniform(-1.0,1.0,newweightshape).astype("float32") - newlayer.weight.data=utils.SetDevice(torch.from_numpy(newweight)) - newweight = TrainLayer(model, layer, newlayer, dataloader, Epoch) - minweight = np.concatenate((minweight, newweight)) - print("search :" + str(i)) - return minweight +traindata, testdata = Loader.Cifar10Mono(batchsize, num_workers=0, shuffle=True) - -# weight = UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=100000, SearchChannelRatio=8, Interation=10) +# weight = EvaluatorUnsuper.UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=100000, SearchChannelRatio=8, Interation=10) # np.save("WeightSearch.npy", weight) -weight = UnsuperLearnTrainWeight(model, layer, traindata) -np.save("WeightTrain.npy", weight) +# weight = EvaluatorUnsuper.UnsuperLearnTrainWeight(model, layer, traindata) +# np.save("WeightTrain.npy", weight) -utils.NumpyToImage(weight, CurrentPath+"image") + +# weight = np.load(CurrentPath+"WeightSearch.npy") +# bestweight = EvaluatorUnsuper.UnsuperLearnFindBestWeight(model,layer,weight,traindata,128,100000) +# np.save(CurrentPath+"bestweightSearch.npy", bestweight) +# utils.NumpyToImage(bestweight, CurrentPath+"image") + + +# weight = np.load(CurrentPath+"WeightTrain.npy") +# bestweight = EvaluatorUnsuper.UnsuperLearnFindBestWeight(model,layer,weight,traindata,128,100000) +# np.save(CurrentPath+"bestweightTrain.npy", bestweight) +# utils.NumpyToImage(bestweight, CurrentPath+"image") + + + +weight = np.load(CurrentPath+"bestweightSearch.npy") +EvaluatorUnsuper.SetModelConvWeight(model,layer,weight) +utils.SaveModel(model,CurrentPath+"/checkpointSearch.pkl") + +weight = np.load(CurrentPath+"bestweightTrain.npy") +EvaluatorUnsuper.SetModelConvWeight(model,layer,weight) +utils.SaveModel(model,CurrentPath+"/checkpointTrain.pkl") + + + +# utils.NumpyToImage(weight, CurrentPath+"image") # utils.SaveModel(model,CurrentPath+"/checkpoint.pkl") print("save model sucess") diff --git a/FilterEvaluator/EvaluatorEntropy.py b/FilterEvaluator/EvaluatorUnsuper.py similarity index 74% rename from FilterEvaluator/EvaluatorEntropy.py rename to FilterEvaluator/EvaluatorUnsuper.py index 4819cab..3235ecc 100644 --- a/FilterEvaluator/EvaluatorEntropy.py +++ b/FilterEvaluator/EvaluatorUnsuper.py @@ -1,10 +1,6 @@ from __future__ import print_function import os import sys - -# import multiprocessing -# multiprocessing.set_start_method('spawn', True) - import torch import torch.nn as nn import torch.nn.functional as F @@ -19,50 +15,13 @@ from PIL import Image import random import cv2 - CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/" -print("Current Path :" + CurrentPath) - sys.path.append(CurrentPath+'../tools') sys.path.append(CurrentPath+'../') import Model as Model from tools import utils, Train, Loader -batchsize = 128 - - -# model = utils.SetDevice(Model.Net5Grad35()) -# model = utils.SetDevice(Model.Net31535()) -model = utils.SetDevice(Model.Net3Grad335()) -# model = utils.SetDevice(Model.Net3()) - - -layers = model.PrintLayer() -layer = 0 - -model = utils.LoadModel(model, CurrentPath+"/checkpoint.pkl") - - - - - - -# traindata, testdata = Loader.MNIST(batchsize) -# traindata, testdata = Loader.RandomMnist(batchsize, style="Vertical") -# traindata, testdata = Loader.RandomMnist(batchsize, style="Horizontal") -# traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalOneLine") -# traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalZebra") -# traindata, testdata = Loader.Cifar10Mono(batchsize) -traindata, testdata = Loader.Cifar10Mono(batchsize, num_workers=2, shuffle=True) - - - - - - - - def GetScore(netmodel,layer,SearchLayer,DataSet,Interation=-1): netmodel.eval() @@ -175,35 +134,49 @@ def UnsuperLearnTrainWeight(model, layer, dataloader, NumTrain=500, TrainChannel return minweight +def UnsuperLearnFindBestWeight(netmodel, layer, weight, dataloader, databatchs=128,interation=10000): + weight = weight.astype("float32") + netmodel.eval() + tl = netmodel.features[layer] + outchannels = tl.out_channels + newlayer = nn.Conv2d(tl.in_channels, tl.out_channels, tl.kernel_size, + tl.stride, tl.padding, tl.dilation, tl.groups, tl.bias, tl.padding_mode) + newlayer = utils.SetDevice(newlayer) + + datas = [] + for batch_idx, (data, target) in enumerate(dataloader): + datas.append(utils.SetDevice(data)) + if batch_idx >= databatchs-1: + break + indexs = np.random.randint(0,weight.shape[0],interation*outchannels).reshape(interation,-1) + entropys = [] + forwardlayer = layer -1 + shift = [] + for i in range(outchannels): + shift.append(1< mean + posi = posi*shift + sums = posi.sum(1).type(torch.float32) + histc = sums.histc(256,0,255).type(torch.float32) + histc = histc[histc>0] + histc = histc/histc.sum() + entropys.append((histc.log2()*histc).sum()) + print("search index : " + str(i)) + argmin = np.argmin(entropys) + bestweight = weight[indexs[argmin]] + return bestweight - - - - -weight = np.load(CurrentPath+"WeightSearch.npy") -# weight = np.zeros((990,3,3)); -# weight[:,1]=100 -# weight[:,:,1]=100 -utils.NumpyToImage(weight,CurrentPath+"image") - -a=0 - - - - - - - - -# weight = UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=100000, SearchChannelRatio=8, Interation=10) -# np.save("WeightSearch.npy", weight) - -weight = UnsuperLearnTrainWeight(model, layer, traindata) -np.save("WeightTrain.npy", weight) - - - -ConvKernelToImage(weight, CurrentPath+"image") -# utils.SaveModel(model,CurrentPath+"/checkpoint.pkl") -print("save model sucess") +def SetModelConvWeight(model, layer, weight): + w = utils.SetDevice(torch.from_numpy(weight)) + model.features[layer].weight.data = w \ No newline at end of file diff --git a/FilterEvaluator/TrainNetwork.py b/FilterEvaluator/TrainNetwork.py index 1d0e74a..ab7c68b 100644 --- a/FilterEvaluator/TrainNetwork.py +++ b/FilterEvaluator/TrainNetwork.py @@ -66,6 +66,8 @@ for i in range(1000): window.AppendData(linePretrain,Train.test(model,testdata)) + + model = utils.SetDevice(Model.Net3335()) model = utils.LoadModel(model, CurrentPath+"/checkpoint.pkl") diff --git a/FilterEvaluator/bestweightSearch.npy b/FilterEvaluator/bestweightSearch.npy new file mode 100644 index 0000000..70a6fe5 Binary files /dev/null and b/FilterEvaluator/bestweightSearch.npy differ diff --git a/FilterEvaluator/bestweightTrain.npy b/FilterEvaluator/bestweightTrain.npy new file mode 100644 index 0000000..c1aa5ac Binary files /dev/null and b/FilterEvaluator/bestweightTrain.npy differ diff --git a/FilterEvaluator/checkpoint.pkl b/FilterEvaluator/checkpoint.pkl deleted file mode 100644 index 02d3147..0000000 Binary files a/FilterEvaluator/checkpoint.pkl and /dev/null differ diff --git a/FilterEvaluator/checkpointSGD.pkl b/FilterEvaluator/checkpointSGD.pkl deleted file mode 100644 index 7a9b4a3..0000000 Binary files a/FilterEvaluator/checkpointSGD.pkl and /dev/null differ diff --git a/FilterEvaluator/checkpointSGDPreTrain.pkl b/FilterEvaluator/checkpointSGDPreTrain.pkl deleted file mode 100644 index 54990fc..0000000 Binary files a/FilterEvaluator/checkpointSGDPreTrain.pkl and /dev/null differ diff --git a/FilterEvaluator/checkpointSearch.pkl b/FilterEvaluator/checkpointSearch.pkl new file mode 100644 index 0000000..2cd5e03 Binary files /dev/null and b/FilterEvaluator/checkpointSearch.pkl differ diff --git a/FilterEvaluator/checkpointTrain.pkl b/FilterEvaluator/checkpointTrain.pkl new file mode 100644 index 0000000..7bdf15b Binary files /dev/null and b/FilterEvaluator/checkpointTrain.pkl differ diff --git a/FilterEvaluator/image/a0-1024.png b/FilterEvaluator/image/a0-1024.png new file mode 100644 index 0000000..e8561b2 Binary files /dev/null and b/FilterEvaluator/image/a0-1024.png differ diff --git a/FilterEvaluator/image/b0-1024.png b/FilterEvaluator/image/b0-1024.png new file mode 100644 index 0000000..d3d1b05 Binary files /dev/null and b/FilterEvaluator/image/b0-1024.png differ diff --git a/FilterEvaluator/image/0-1024.png b/FilterEvaluator/imageSearch/0-1024.png similarity index 100% rename from FilterEvaluator/image/0-1024.png rename to FilterEvaluator/imageSearch/0-1024.png diff --git a/FilterEvaluator/image/1024-2048.png b/FilterEvaluator/imageSearch/1024-2048.png similarity index 100% rename from FilterEvaluator/image/1024-2048.png rename to FilterEvaluator/imageSearch/1024-2048.png diff --git a/FilterEvaluator/image/2048-3072.png b/FilterEvaluator/imageSearch/2048-3072.png similarity index 100% rename from FilterEvaluator/image/2048-3072.png rename to FilterEvaluator/imageSearch/2048-3072.png diff --git a/FilterEvaluator/image/3072-4096.png b/FilterEvaluator/imageSearch/3072-4096.png similarity index 100% rename from FilterEvaluator/image/3072-4096.png rename to FilterEvaluator/imageSearch/3072-4096.png diff --git a/FilterEvaluator/imageTrain/0-1024.png b/FilterEvaluator/imageTrain/0-1024.png new file mode 100644 index 0000000..620c960 Binary files /dev/null and b/FilterEvaluator/imageTrain/0-1024.png differ diff --git a/FilterEvaluator/imageTrain/1024-2048.png b/FilterEvaluator/imageTrain/1024-2048.png new file mode 100644 index 0000000..543db35 Binary files /dev/null and b/FilterEvaluator/imageTrain/1024-2048.png differ diff --git a/FilterEvaluator/imageTrain/2048-3072.png b/FilterEvaluator/imageTrain/2048-3072.png new file mode 100644 index 0000000..40c0366 Binary files /dev/null and b/FilterEvaluator/imageTrain/2048-3072.png differ diff --git a/FilterEvaluator/imageTrain/3072-4096.png b/FilterEvaluator/imageTrain/3072-4096.png new file mode 100644 index 0000000..212431c Binary files /dev/null and b/FilterEvaluator/imageTrain/3072-4096.png differ diff --git a/tools/utils.py b/tools/utils.py index f737794..88c62d8 100755 --- a/tools/utils.py +++ b/tools/utils.py @@ -9,6 +9,7 @@ from easydict import EasyDict as edict import yaml import numpy as np import argparse +import cv2 class AverageMeter(object): """ Computes ans stores the average and current value""" @@ -103,7 +104,7 @@ def SaveModel(model , filename='checkpoint_'+str(time.time()) + '.pkl'): # 'loss': loss, }, filename) def LoadModel(model, filename): - checkpoint = torch.load(filename) + checkpoint = torch.load(filename, map_location="cpu") model.load_state_dict(checkpoint['model_state_dict']) # optimizer.load_state_dict(checkpoint['optimizer_state_dict']) # epoch = checkpoint['epoch'] @@ -164,7 +165,8 @@ def ConvKernelToImage(model, layer, foldname): d = d.astype(int) cv2.imwrite(foldname+"/"+str(i)+".png", d) -def NumpyToImage(numpydate, foldname ,maxImageWidth = 128,maxImageHeight = 128): + +def NumpyToImage(numpydate, foldname, title="", maxImageWidth=128, maxImageHeight=128): if not os.path.exists(foldname): os.mkdir(foldname) numpydatemin = np.min(numpydate) @@ -187,5 +189,5 @@ def NumpyToImage(numpydate, foldname ,maxImageWidth = 128,maxImageHeight = 128): d=np.reshape(d, (imagerows, imagecols, datashape[1], datashape[2])) d=np.swapaxes(d, 1, 2) d=np.reshape(d, (imagerows*datashape[1],imagecols*datashape[2])) - d = d.astype(int) - cv2.imwrite(foldname+"/"+str(i)+"-"+str(i+stepimages)+".png", d) + d = d.astype("uint8") + cv2.imwrite(foldname+"/"+title+str(i)+"-"+str(i+stepimages)+".png", d)