Add Directed search 8 weights instead of work with two steps.

This commit is contained in:
c 2019-12-17 13:53:27 +08:00
parent fa7a066b87
commit 418e79df60
11 changed files with 129 additions and 34 deletions

View File

@ -44,31 +44,42 @@ model.PrintLayer()
# traindata, testdata = Loader.MNIST(batchsize) # traindata, testdata = Loader.MNIST(batchsize, shuffle=True,)
traindata, testdata = Loader.MNIST(batchsize, resize=7) traindata, testdata = Loader.MNIST(batchsize, shuffle=True, resize=7)
# traindata, testdata = Loader.RandomMnist(batchsize, style="Vertical") # traindata, testdata = Loader.RandomMnist(batchsize, shuffle=True, style="Vertical")
# traindata, testdata = Loader.RandomMnist(batchsize, style="Horizontal") # traindata, testdata = Loader.RandomMnist(batchsize, shuffle=True, style="Horizontal")
# traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalOneLine") # traindata, testdata = Loader.RandomMnist(batchsize, shuffle=True, style="VerticalOneLine")
# traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalZebra") # traindata, testdata = Loader.RandomMnist(batchsize, shuffle=True, style="VerticalZebra")
# traindata, testdata = Loader.Cifar10Mono(batchsize) # traindata, testdata = Loader.Cifar10Mono(batchsize, shuffle=True)
# traindata, testdata = Loader.Cifar10Mono(batchsize, num_workers=0, shuffle=False) # traindata, testdata = Loader.Cifar10Mono(batchsize, shuffle=True, num_workers=0, shuffle=False)
# weight,active = EvaluatorUnsuper.UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=500000, SearchChannelRatio=32, Interation=5) #weight,active = EvaluatorUnsuper.UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=500000, SearchChannelRatio=32, Interation=50)
# np.save("WeightSearch.npy", weight) #np.save("WeightSearch.npy", weight)
weight = np.load(CurrentPath+"WeightSearch.npy") # weight = np.load(CurrentPath+"WeightSearch.npy")
utils.NumpyToImage(weight, CurrentPath+"image",title="SearchWeight") # utils.NumpyToImage(weight, CurrentPath+"image",title="SearchWeight")
weight = np.load(CurrentPath+"WeightSearch.npy") # weight = np.load(CurrentPath+"WeightSearch.npy")
# weight = weight[0:256] # # weight = weight[0:256]
bestweight,index = EvaluatorUnsuper.UnsuperLearnFindBestWeight(model,layer,weight,traindata,32,4000000) # bestweight,index = EvaluatorUnsuper.UnsuperLearnFindBestWeight(model,layer,weight,traindata,32,20000000)
np.save(CurrentPath+"bestweightSearch.npy", bestweight) # np.save(CurrentPath+"bestweightSearch.npy", bestweight)
bestweight = np.load(CurrentPath+"bestweightSearch.npy") # bestweight = np.load(CurrentPath+"bestweightSearch.npy")
utils.NumpyToImage(bestweight, CurrentPath+"image",title="SearchWerightBest") # utils.NumpyToImage(bestweight, CurrentPath+"image",title="SearchWerightBest")
# EvaluatorUnsuper.SetModelConvWeight(model,layer,bestweight)
# utils.SaveModel(model,CurrentPath+"/checkpointSearch.pkl")
bestweight = EvaluatorUnsuper.UnsuperLearnSearchBestWeight(model,layer,traindata,32,20,250000)
np.save(CurrentPath+"bestweightEntropySearch.npy", bestweight)
utils.NumpyToImage(bestweight, CurrentPath+"image",title="EntropySearchWerightBest")
EvaluatorUnsuper.SetModelConvWeight(model,layer,bestweight) EvaluatorUnsuper.SetModelConvWeight(model,layer,bestweight)
utils.SaveModel(model,CurrentPath+"/checkpointSearch.pkl") utils.SaveModel(model,CurrentPath+"/checkpointEntropySearch.pkl")

View File

@ -15,6 +15,7 @@ from PIL import Image
import random import random
import cv2 import cv2
from tqdm import tqdm from tqdm import tqdm
import threading
CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/" CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/"
@ -134,7 +135,8 @@ def UnsuperLearnTrainWeight(model, layer, dataloader, NumTrain=500, TrainChannel
print("search :" + str(i)) print("search :" + str(i))
return minweight return minweight
def UnsuperLearnFindBestWeight(netmodel, layer, weight, dataloader, databatchs=128,interation=10000): # search best weight from weight input
def UnsuperLearnFindBestWeight(netmodel, layer, weight, dataloader, databatchs=128, interation=10000):
interationbar = tqdm(total=interation) interationbar = tqdm(total=interation)
weight = weight.astype("float32") weight = weight.astype("float32")
netmodel.eval() netmodel.eval()
@ -177,6 +179,76 @@ def UnsuperLearnFindBestWeight(netmodel, layer, weight, dataloader, databatchs=1
interationbar.close() interationbar.close()
return bestweight,indexs[sortindex] return bestweight,indexs[sortindex]
# search best weight from random data
def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, stepsize=1000, interation=1000):
interationbar = tqdm(total=interation)
forwardlayer = layer -1
bestweight = []
bestentropy = 100.0
netmodel.eval()
tl = netmodel.features[layer]
outchannels = tl.out_channels
newlayer = nn.Conv2d(tl.in_channels, stepsize, tl.kernel_size,
tl.stride, tl.padding, tl.dilation, tl.groups, tl.bias, tl.padding_mode)
newlayer = utils.SetDevice(newlayer)
# pre load train data for speed up.
datas = []
for batch_idx, (data, target) in enumerate(dataloader):
datas.append(utils.SetDevice(data))
if batch_idx >= databatchs-1:
break
# pre cal layer input for speed up.
datasnetout = []
for d in datas:
datasnetout.append(netmodel.ForwardLayer(d,forwardlayer))
datasnetout = torch.cat(datasnetout)
# gen shift const data for histc
shift = []
for i in range(outchannels):
shift.append(1<<i)
shift = utils.SetDevice(torch.from_numpy(np.array(shift).astype("uint8")))
for j in range(interation):
newweightshape = list(newlayer.weight.data.shape)
newweightshape[0] = stepsize
newweight = np.random.uniform(-1.0,1.0,newweightshape).astype("float32")
newweight = newweight.reshape((-1,newweightshape[-1]*newweightshape[-2]))
newweight = np.swapaxes(newweight,0,1)-np.mean(newweight,-1)
newweight = np.swapaxes(newweight,0,1).reshape(newweightshape)
newlayer.weight.data=utils.SetDevice(torch.from_numpy(newweight))
outputs = newlayer(datasnetout).transpose(0,1)
samplesize = stepsize*stepsize
indexs = np.random.randint(0,stepsize,samplesize*outchannels).reshape(samplesize,-1)
# 1000 8 4096 5 5
outputs = outputs[indexs]
# 102400 1000 8
reshaped = outputs.reshape(samplesize, outchannels, -1).permute(2, 0, 1)
# 1000 8
meaned = reshaped.mean(0)
# 102400 1000
bitted = ((reshaped > meaned)* shift).sum(2).type(torch.float32)
entropys = []
for i in range(len(indexs)):
histced = bitted[:,i].histc(256,0,255).type(torch.float32)
histced = histced[histced>0]
histced = histced/histced.sum()
entropy = (histced.log2()*histced).sum()
entropys.append(entropy.detach().cpu().numpy())
argmin = np.argmin(entropys)
if entropys[argmin] < bestentropy:
bestweight = newweight[indexs[argmin]]
bestentropy = entropys[argmin]
interationbar.update(1)
interationbar.close()
return bestweight
def SetModelConvWeight(model, layer, weight): def SetModelConvWeight(model, layer, weight):
w = utils.SetDevice(torch.from_numpy(weight)) w = utils.SetDevice(torch.from_numpy(weight))
model.features[layer].weight.data = w model.features[layer].weight.data = w

View File

@ -65,8 +65,8 @@ class Net333(UniModule.ModuleBase):
super(Net333, self).__init__() super(Net333, self).__init__()
layers = [] layers = []
layers += [nn.Conv2d(1, 8, kernel_size=3,bias=False),nn.Sigmoid()] layers += [nn.Conv2d(1, 8, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(8, 8, kernel_size=3,bias=False),nn.Sigmoid()] layers += [nn.Conv2d(8, 1, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(8, 10, kernel_size=3,bias=False)] layers += [nn.Conv2d(1, 10, kernel_size=3,bias=False)]
self.features = nn.Sequential(*layers) self.features = nn.Sequential(*layers)
def forward(self, x): def forward(self, x):
x = self.features(x) x = self.features(x)
@ -78,8 +78,8 @@ class Net3Grad33(UniModule.ModuleBase):
super(Net3Grad33, self).__init__() super(Net3Grad33, self).__init__()
layers = [] layers = []
layers += [nn.Conv2d(1, 8, kernel_size=3,bias=False),nn.Sigmoid()] layers += [nn.Conv2d(1, 8, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(8, 8, kernel_size=3,bias=False),nn.Sigmoid()] layers += [nn.Conv2d(8, 1, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(8, 10, kernel_size=3,bias=False)] layers += [nn.Conv2d(1, 10, kernel_size=3,bias=False)]
self.features = nn.Sequential(*layers) self.features = nn.Sequential(*layers)
self.SetConvRequiresGrad(0,False) self.SetConvRequiresGrad(0,False)
def forward(self, x): def forward(self, x):

View File

@ -25,7 +25,7 @@ sys.path.append(CurrentPath+'../')
import Model import Model
from tools import utils, Train, Loader, WebVisual from tools import utils, Train, Loader, WebVisual
import EvaluatorUnsuper
batchsize = 128 batchsize = 128
@ -40,7 +40,7 @@ batchsize = 128
# traindata, testdata = Loader.MNIST(batchsize, num_workers=4, trainsize=5000) # traindata, testdata = Loader.MNIST(batchsize, num_workers=4, trainsize=5000)
traindata, testdata = Loader.MNIST(batchsize, resize=7, trainsize=5000) traindata, testdata = Loader.MNIST(batchsize, resize=7, trainsize=200)
# traindata, testdata = Loader.RandomMnist(batchsize, num_workers=4, style="Vertical") # traindata, testdata = Loader.RandomMnist(batchsize, num_workers=4, style="Vertical")
# traindata, testdata = Loader.RandomMnist(batchsize, num_workers=4, style="Horizontal") # traindata, testdata = Loader.RandomMnist(batchsize, num_workers=4, style="Horizontal")
# traindata, testdata = Loader.RandomMnist(batchsize, num_workers=4, style="VerticalOneLine") # traindata, testdata = Loader.RandomMnist(batchsize, num_workers=4, style="VerticalOneLine")
@ -84,15 +84,24 @@ linePretrainTrain = WebVisual.Line(window, "PretrainTrain")
model = utils.SetDevice(Model.Net333())
optimizer = optim.SGD(model.parameters(), lr=0.1)
Train.TrainEpochs(model,traindata,optimizer,testdata,1000,15,lineNoPre)
model = utils.SetDevice(Model.Net333())
# bestweight = np.ones((model.features[0].weight.data.shape),dtype="float32")
# EvaluatorUnsuper.SetModelConvWeight(model,0,bestweight)
optimizer = optim.SGD(model.parameters(), lr=0.1)
Train.TrainEpochs(model,traindata,optimizer,testdata,3000,10,lineNoPre)
# model = utils.SetDevice(Model.Net3Grad33())
# model = utils.LoadModel(model, CurrentPath+"/checkpointSearch.pkl")
# optimizer = optim.SGD(model.parameters(), lr=0.1)
# Train.TrainEpochs(model,traindata,optimizer,testdata,3000,10,linePretrainSearch)
model = utils.SetDevice(Model.Net3Grad33()) model = utils.SetDevice(Model.Net3Grad33())
model = utils.LoadModel(model, CurrentPath+"/checkpointSearch.pkl") model = utils.LoadModel(model, CurrentPath+"/checkpointEntropySearch.pkl")
optimizer = optim.SGD(model.parameters(), lr=0.1) optimizer = optim.SGD(model.parameters(), lr=0.1)
Train.TrainEpochs(model,traindata,optimizer,testdata,1000,15,linePretrainSearch) Train.TrainEpochs(model,traindata,optimizer,testdata,3000,10,linePretrainSearch)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -34,8 +34,11 @@ def MNIST(batchsize=8, num_workers=0, shuffle=False, trainsize=0, resize=0):
transforms.ToTensor(), transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) transforms.Normalize((0.1307,), (0.3081,))
])), batch_size=batchsize, shuffle=shuffle, num_workers=num_workers, drop_last=True) ])), batch_size=batchsize, shuffle=shuffle, num_workers=num_workers, drop_last=True)
train_loader.batch_sampler.sampler.num_samples = 50000 print("Train Data size:"+str(trainsize)+" Shuffle:"+str(shuffle)+" BatchSize:"+str(batchsize))
try:
train_loader.batch_sampler.sampler.num_samples = trainsize
test_loader.batch_sampler.sampler.num_samples = 10000 test_loader.batch_sampler.sampler.num_samples = 10000
finally:
return train_loader, test_loader return train_loader, test_loader
def Cifar10(batchsize=8, num_workers=0, shuffle=False): def Cifar10(batchsize=8, num_workers=0, shuffle=False):