diff --git a/FilterEvaluator/Evaluator.py b/FilterEvaluator/Evaluator.py index 1a3d4fc..937b9bd 100644 --- a/FilterEvaluator/Evaluator.py +++ b/FilterEvaluator/Evaluator.py @@ -44,31 +44,42 @@ model.PrintLayer() -# traindata, testdata = Loader.MNIST(batchsize) -traindata, testdata = Loader.MNIST(batchsize, resize=7) -# traindata, testdata = Loader.RandomMnist(batchsize, style="Vertical") -# traindata, testdata = Loader.RandomMnist(batchsize, style="Horizontal") -# traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalOneLine") -# traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalZebra") -# traindata, testdata = Loader.Cifar10Mono(batchsize) -# traindata, testdata = Loader.Cifar10Mono(batchsize, num_workers=0, shuffle=False) +# traindata, testdata = Loader.MNIST(batchsize, shuffle=True,) +traindata, testdata = Loader.MNIST(batchsize, shuffle=True, resize=7) +# traindata, testdata = Loader.RandomMnist(batchsize, shuffle=True, style="Vertical") +# traindata, testdata = Loader.RandomMnist(batchsize, shuffle=True, style="Horizontal") +# traindata, testdata = Loader.RandomMnist(batchsize, shuffle=True, style="VerticalOneLine") +# traindata, testdata = Loader.RandomMnist(batchsize, shuffle=True, style="VerticalZebra") +# traindata, testdata = Loader.Cifar10Mono(batchsize, shuffle=True) +# traindata, testdata = Loader.Cifar10Mono(batchsize, shuffle=True, num_workers=0, shuffle=False) -# weight,active = EvaluatorUnsuper.UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=500000, SearchChannelRatio=32, Interation=5) -# np.save("WeightSearch.npy", weight) -weight = np.load(CurrentPath+"WeightSearch.npy") -utils.NumpyToImage(weight, CurrentPath+"image",title="SearchWeight") -weight = np.load(CurrentPath+"WeightSearch.npy") -# weight = weight[0:256] -bestweight,index = EvaluatorUnsuper.UnsuperLearnFindBestWeight(model,layer,weight,traindata,32,4000000) -np.save(CurrentPath+"bestweightSearch.npy", bestweight) -bestweight = np.load(CurrentPath+"bestweightSearch.npy") -utils.NumpyToImage(bestweight, CurrentPath+"image",title="SearchWerightBest") +#weight,active = EvaluatorUnsuper.UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=500000, SearchChannelRatio=32, Interation=50) +#np.save("WeightSearch.npy", weight) +# weight = np.load(CurrentPath+"WeightSearch.npy") +# utils.NumpyToImage(weight, CurrentPath+"image",title="SearchWeight") +# weight = np.load(CurrentPath+"WeightSearch.npy") +# # weight = weight[0:256] +# bestweight,index = EvaluatorUnsuper.UnsuperLearnFindBestWeight(model,layer,weight,traindata,32,20000000) +# np.save(CurrentPath+"bestweightSearch.npy", bestweight) +# bestweight = np.load(CurrentPath+"bestweightSearch.npy") +# utils.NumpyToImage(bestweight, CurrentPath+"image",title="SearchWerightBest") +# EvaluatorUnsuper.SetModelConvWeight(model,layer,bestweight) +# utils.SaveModel(model,CurrentPath+"/checkpointSearch.pkl") + + + + + +bestweight = EvaluatorUnsuper.UnsuperLearnSearchBestWeight(model,layer,traindata,32,20,250000) +np.save(CurrentPath+"bestweightEntropySearch.npy", bestweight) +utils.NumpyToImage(bestweight, CurrentPath+"image",title="EntropySearchWerightBest") EvaluatorUnsuper.SetModelConvWeight(model,layer,bestweight) -utils.SaveModel(model,CurrentPath+"/checkpointSearch.pkl") +utils.SaveModel(model,CurrentPath+"/checkpointEntropySearch.pkl") + diff --git a/FilterEvaluator/EvaluatorUnsuper.py b/FilterEvaluator/EvaluatorUnsuper.py index 8165b58..964534b 100644 --- a/FilterEvaluator/EvaluatorUnsuper.py +++ b/FilterEvaluator/EvaluatorUnsuper.py @@ -15,6 +15,7 @@ from PIL import Image import random import cv2 from tqdm import tqdm +import threading CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/" @@ -134,7 +135,8 @@ def UnsuperLearnTrainWeight(model, layer, dataloader, NumTrain=500, TrainChannel print("search :" + str(i)) return minweight -def UnsuperLearnFindBestWeight(netmodel, layer, weight, dataloader, databatchs=128,interation=10000): +# search best weight from weight input +def UnsuperLearnFindBestWeight(netmodel, layer, weight, dataloader, databatchs=128, interation=10000): interationbar = tqdm(total=interation) weight = weight.astype("float32") netmodel.eval() @@ -177,6 +179,76 @@ def UnsuperLearnFindBestWeight(netmodel, layer, weight, dataloader, databatchs=1 interationbar.close() return bestweight,indexs[sortindex] +# search best weight from random data +def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, stepsize=1000, interation=1000): + interationbar = tqdm(total=interation) + forwardlayer = layer -1 + bestweight = [] + bestentropy = 100.0 + netmodel.eval() + tl = netmodel.features[layer] + outchannels = tl.out_channels + newlayer = nn.Conv2d(tl.in_channels, stepsize, tl.kernel_size, + tl.stride, tl.padding, tl.dilation, tl.groups, tl.bias, tl.padding_mode) + newlayer = utils.SetDevice(newlayer) + + # pre load train data for speed up. + datas = [] + for batch_idx, (data, target) in enumerate(dataloader): + datas.append(utils.SetDevice(data)) + if batch_idx >= databatchs-1: + break + # pre cal layer input for speed up. + datasnetout = [] + for d in datas: + datasnetout.append(netmodel.ForwardLayer(d,forwardlayer)) + datasnetout = torch.cat(datasnetout) + # gen shift const data for histc + shift = [] + for i in range(outchannels): + shift.append(1< meaned)* shift).sum(2).type(torch.float32) + + entropys = [] + for i in range(len(indexs)): + histced = bitted[:,i].histc(256,0,255).type(torch.float32) + histced = histced[histced>0] + histced = histced/histced.sum() + entropy = (histced.log2()*histced).sum() + entropys.append(entropy.detach().cpu().numpy()) + + argmin = np.argmin(entropys) + if entropys[argmin] < bestentropy: + bestweight = newweight[indexs[argmin]] + bestentropy = entropys[argmin] + interationbar.update(1) + interationbar.close() + return bestweight + + + + def SetModelConvWeight(model, layer, weight): w = utils.SetDevice(torch.from_numpy(weight)) model.features[layer].weight.data = w diff --git a/FilterEvaluator/Model.py b/FilterEvaluator/Model.py index 2454be0..fe17c0a 100644 --- a/FilterEvaluator/Model.py +++ b/FilterEvaluator/Model.py @@ -65,8 +65,8 @@ class Net333(UniModule.ModuleBase): super(Net333, self).__init__() layers = [] layers += [nn.Conv2d(1, 8, kernel_size=3,bias=False),nn.Sigmoid()] - layers += [nn.Conv2d(8, 8, kernel_size=3,bias=False),nn.Sigmoid()] - layers += [nn.Conv2d(8, 10, kernel_size=3,bias=False)] + layers += [nn.Conv2d(8, 1, kernel_size=3,bias=False),nn.Sigmoid()] + layers += [nn.Conv2d(1, 10, kernel_size=3,bias=False)] self.features = nn.Sequential(*layers) def forward(self, x): x = self.features(x) @@ -78,8 +78,8 @@ class Net3Grad33(UniModule.ModuleBase): super(Net3Grad33, self).__init__() layers = [] layers += [nn.Conv2d(1, 8, kernel_size=3,bias=False),nn.Sigmoid()] - layers += [nn.Conv2d(8, 8, kernel_size=3,bias=False),nn.Sigmoid()] - layers += [nn.Conv2d(8, 10, kernel_size=3,bias=False)] + layers += [nn.Conv2d(8, 1, kernel_size=3,bias=False),nn.Sigmoid()] + layers += [nn.Conv2d(1, 10, kernel_size=3,bias=False)] self.features = nn.Sequential(*layers) self.SetConvRequiresGrad(0,False) def forward(self, x): diff --git a/FilterEvaluator/TrainNetwork.py b/FilterEvaluator/TrainNetwork.py index 1c0a547..f8b42f9 100644 --- a/FilterEvaluator/TrainNetwork.py +++ b/FilterEvaluator/TrainNetwork.py @@ -25,7 +25,7 @@ sys.path.append(CurrentPath+'../') import Model from tools import utils, Train, Loader, WebVisual - +import EvaluatorUnsuper batchsize = 128 @@ -40,7 +40,7 @@ batchsize = 128 # traindata, testdata = Loader.MNIST(batchsize, num_workers=4, trainsize=5000) -traindata, testdata = Loader.MNIST(batchsize, resize=7, trainsize=5000) +traindata, testdata = Loader.MNIST(batchsize, resize=7, trainsize=200) # traindata, testdata = Loader.RandomMnist(batchsize, num_workers=4, style="Vertical") # traindata, testdata = Loader.RandomMnist(batchsize, num_workers=4, style="Horizontal") # traindata, testdata = Loader.RandomMnist(batchsize, num_workers=4, style="VerticalOneLine") @@ -84,15 +84,24 @@ linePretrainTrain = WebVisual.Line(window, "PretrainTrain") -model = utils.SetDevice(Model.Net333()) -optimizer = optim.SGD(model.parameters(), lr=0.1) -Train.TrainEpochs(model,traindata,optimizer,testdata,1000,15,lineNoPre) +model = utils.SetDevice(Model.Net333()) +# bestweight = np.ones((model.features[0].weight.data.shape),dtype="float32") +# EvaluatorUnsuper.SetModelConvWeight(model,0,bestweight) +optimizer = optim.SGD(model.parameters(), lr=0.1) +Train.TrainEpochs(model,traindata,optimizer,testdata,3000,10,lineNoPre) + + +# model = utils.SetDevice(Model.Net3Grad33()) +# model = utils.LoadModel(model, CurrentPath+"/checkpointSearch.pkl") +# optimizer = optim.SGD(model.parameters(), lr=0.1) +# Train.TrainEpochs(model,traindata,optimizer,testdata,3000,10,linePretrainSearch) + model = utils.SetDevice(Model.Net3Grad33()) -model = utils.LoadModel(model, CurrentPath+"/checkpointSearch.pkl") +model = utils.LoadModel(model, CurrentPath+"/checkpointEntropySearch.pkl") optimizer = optim.SGD(model.parameters(), lr=0.1) -Train.TrainEpochs(model,traindata,optimizer,testdata,1000,15,linePretrainSearch) +Train.TrainEpochs(model,traindata,optimizer,testdata,3000,10,linePretrainSearch) diff --git a/FilterEvaluator/WeightSearch.npy b/FilterEvaluator/WeightSearch.npy index 245ae5e..8c9f8fe 100644 Binary files a/FilterEvaluator/WeightSearch.npy and b/FilterEvaluator/WeightSearch.npy differ diff --git a/FilterEvaluator/bestweightEntropySearch.npy b/FilterEvaluator/bestweightEntropySearch.npy new file mode 100644 index 0000000..5f64aa9 Binary files /dev/null and b/FilterEvaluator/bestweightEntropySearch.npy differ diff --git a/FilterEvaluator/bestweightSearch.npy b/FilterEvaluator/bestweightSearch.npy index 184cad7..24a5c24 100644 Binary files a/FilterEvaluator/bestweightSearch.npy and b/FilterEvaluator/bestweightSearch.npy differ diff --git a/FilterEvaluator/checkpoint.pkl b/FilterEvaluator/checkpoint.pkl index a9b6da9..a87e062 100644 Binary files a/FilterEvaluator/checkpoint.pkl and b/FilterEvaluator/checkpoint.pkl differ diff --git a/FilterEvaluator/checkpointEntropySearch.pkl b/FilterEvaluator/checkpointEntropySearch.pkl new file mode 100644 index 0000000..dbcdf12 Binary files /dev/null and b/FilterEvaluator/checkpointEntropySearch.pkl differ diff --git a/FilterEvaluator/checkpointSearch.pkl b/FilterEvaluator/checkpointSearch.pkl index d365846..f38ee2c 100644 Binary files a/FilterEvaluator/checkpointSearch.pkl and b/FilterEvaluator/checkpointSearch.pkl differ diff --git a/tools/Loader.py b/tools/Loader.py index c37cfe3..c32c3b5 100644 --- a/tools/Loader.py +++ b/tools/Loader.py @@ -34,9 +34,12 @@ def MNIST(batchsize=8, num_workers=0, shuffle=False, trainsize=0, resize=0): transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=batchsize, shuffle=shuffle, num_workers=num_workers, drop_last=True) - train_loader.batch_sampler.sampler.num_samples = 50000 - test_loader.batch_sampler.sampler.num_samples = 10000 - return train_loader, test_loader + print("Train Data size:"+str(trainsize)+" Shuffle:"+str(shuffle)+" BatchSize:"+str(batchsize)) + try: + train_loader.batch_sampler.sampler.num_samples = trainsize + test_loader.batch_sampler.sampler.num_samples = 10000 + finally: + return train_loader, test_loader def Cifar10(batchsize=8, num_workers=0, shuffle=False): CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/"