Speedup to search.

1 modle set to channel output 4 from 8
2 network weights is type of int from -4 to 4
This commit is contained in:
c 2019-12-30 17:24:27 +08:00
parent 18126824ee
commit 346159068c
5 changed files with 22 additions and 14 deletions

View File

@ -74,8 +74,9 @@ traindata, testdata = Loader.MNIST(batchsize, shuffle=True, resize=7)
bestweight = EvaluatorUnsuper.UnsuperLearnSearchBestWeight(model,layer,traindata,8,20,250000)
bestweight,bestentropy = EvaluatorUnsuper.UnsuperLearnSearchBestWeight(model,layer,traindata,8,50,1000000)
np.save(CurrentPath+"bestweightEntropySearch.npy", bestweight)
np.save(CurrentPath+"bestweightEntropySearch_entropy="+str(bestentropy), bestweight)
utils.NumpyToImage(bestweight, CurrentPath+"image",title="EntropySearchWerightBest")
EvaluatorUnsuper.SetModelConvWeight(model,layer,bestweight)
utils.SaveModel(model,CurrentPath+"/checkpointEntropySearch.pkl")

View File

@ -190,6 +190,9 @@ def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, st
newlayer = nn.Conv2d(tl.in_channels, stepsize, tl.kernel_size,
tl.stride, tl.padding, tl.dilation, tl.groups, tl.bias, tl.padding_mode)
newlayer = utils.SetDevice(newlayer)
newweightshape = list(newlayer.weight.data.shape)
newweightshape[0] = stepsize
newweightshaperandom = [newweightshape[-1]*newweightshape[-2],newweightshape[0]*newweightshape[1]]
# pre load train data for speed up.
datas = []
@ -209,7 +212,8 @@ def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, st
shift = utils.SetDevice(torch.from_numpy(np.array(shift).astype("uint8")))
# shift = torch.from_numpy(np.array(shift).astype("uint8"))
bestweight = []
bestweight = [np.zeros((8))]
bestentropy = [100.0]
bittedset = []
bittedLock = threading.Lock()
@ -237,18 +241,21 @@ def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, st
bestLock.acquire()
if entropys[argmin] < bestentropy[0]:
bestweight = newweight[indexs[argmin]]
bestweight[0] = newweight[indexs[argmin]]
bestentropy[0] = entropys[argmin]
print("finded better entropy")
bestLock.release()
for j in range(interation):
newweightshape = list(newlayer.weight.data.shape)
newweightshape[0] = stepsize
newweight = np.random.uniform(-1.0,1.0,newweightshape).astype("float32")
newweight = newweight.reshape((-1,newweightshape[-1]*newweightshape[-2]))
newweight = np.swapaxes(newweight,0,1)-np.mean(newweight,-1)
newweight = np.swapaxes(newweight,0,1).reshape(newweightshape)
# newweight = np.random.uniform(-1.0,1.0,newweightshape).astype("float32")
# newweight = newweight.reshape((-1,newweightshape[-1]*newweightshape[-2]))
# newweight = np.swapaxes(newweight,0,1)-np.mean(newweight,-1)
# newweight = np.swapaxes(newweight,0,1).reshape(newweightshape)
newweight = np.random.randint(-4.0,5.0,newweightshaperandom).astype("int32")
newweight = (newweight-np.mean(newweight,0)).astype("int32")
newweight = np.swapaxes(newweight,0,1).reshape(newweightshape).astype("float32")
newlayer.weight.data=utils.SetDevice(torch.from_numpy(newweight))
outputs = newlayer(datasnetout).transpose(0,1)
@ -289,7 +296,7 @@ def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, st
time.sleep(100)
interationbar.close()
return bestweight
return bestweight[0],bestentropy[0]
def SetModelConvWeight(model, layer, weight):
w = utils.SetDevice(torch.from_numpy(weight))

View File

@ -64,8 +64,8 @@ class Net333(UniModule.ModuleBase):
def __init__(self):
super(Net333, self).__init__()
layers = []
layers += [nn.Conv2d(1, 8, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(8, 1, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(1, 4, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(4, 1, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(1, 10, kernel_size=3,bias=False)]
self.features = nn.Sequential(*layers)
def forward(self, x):
@ -77,8 +77,8 @@ class Net3Grad33(UniModule.ModuleBase):
def __init__(self):
super(Net3Grad33, self).__init__()
layers = []
layers += [nn.Conv2d(1, 8, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(8, 1, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(1, 4, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(4, 1, kernel_size=3,bias=False),nn.Sigmoid()]
layers += [nn.Conv2d(1, 10, kernel_size=3,bias=False)]
self.features = nn.Sequential(*layers)
self.SetConvRequiresGrad(0,False)