add minactive return in search
|
@ -7,3 +7,4 @@ Dataset/
|
|||
.vscode
|
||||
/*/__pycache__
|
||||
.mypy_cache
|
||||
/FilterEvaluator/image*
|
||||
|
|
|
@ -51,7 +51,7 @@ layer = 0
|
|||
# traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalOneLine")
|
||||
# traindata, testdata = Loader.RandomMnist(batchsize, style="VerticalZebra")
|
||||
# traindata, testdata = Loader.Cifar10Mono(batchsize)
|
||||
traindata, testdata = Loader.Cifar10Mono(batchsize, num_workers=0, shuffle=True)
|
||||
traindata, testdata = Loader.Cifar10Mono(batchsize, num_workers=0, shuffle=False)
|
||||
|
||||
|
||||
|
||||
|
@ -71,17 +71,31 @@ traindata, testdata = Loader.Cifar10Mono(batchsize, num_workers=0, shuffle=True)
|
|||
|
||||
|
||||
|
||||
for batch_idx, (data, target) in enumerate(traindata):
|
||||
utils.NumpyToImage(data.cpu().detach().numpy(), CurrentPath+"image", title="TrainData")
|
||||
break
|
||||
|
||||
|
||||
# weight = EvaluatorUnsuper.UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=100000, SearchChannelRatio=32, Interation=10)
|
||||
# np.save("WeightSearch.npy", weight)
|
||||
weight = np.load(CurrentPath+"WeightSearch.npy")
|
||||
weight,active = EvaluatorUnsuper.UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=1,SaveChannel=8,SearchChannelRatio=1, Interation=128)
|
||||
utils.NumpyToImage(weight, CurrentPath+"image",title="SearchWeight")
|
||||
|
||||
b =0
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# weight,active = EvaluatorUnsuper.UnsuperLearnSearchWeight(model, layer, traindata, NumSearch=100000, SearchChannelRatio=32, Interation=10)
|
||||
# np.save("WeightSearch.npy", weight)
|
||||
# weight = np.load(CurrentPath+"WeightSearch.npy")
|
||||
# utils.NumpyToImage(weight, CurrentPath+"image",title="SearchWeight")
|
||||
# weight = np.load(CurrentPath+"WeightSearch.npy")
|
||||
# bestweight,index = EvaluatorUnsuper.UnsuperLearnFindBestWeight(model,layer,weight,traindata,128,100000)
|
||||
# np.save(CurrentPath+"bestweightSearch.npy", bestweight)
|
||||
# bestweight = np.load(CurrentPath+"bestweightSearch.npy")
|
||||
# utils.NumpyToImage(bestweight, CurrentPath+"image")
|
||||
# utils.NumpyToImage(bestweight, CurrentPath+"image",title="SearchWerightBest")
|
||||
# EvaluatorUnsuper.SetModelConvWeight(model,layer,bestweight)
|
||||
# utils.SaveModel(model,CurrentPath+"/checkpointSearch.pkl")
|
||||
|
||||
|
@ -90,12 +104,12 @@ utils.NumpyToImage(weight, CurrentPath+"image",title="SearchWeight")
|
|||
|
||||
# weight = EvaluatorUnsuper.UnsuperLearnTrainWeight(model, layer, traindata, NumTrain=5000)
|
||||
# np.save("WeightTrain.npy", weight)
|
||||
# utils.NumpyToImage(bestweight, CurrentPath+"image",title="TrainWeight")
|
||||
# weight = np.load(CurrentPath+"WeightTrain.npy")
|
||||
# utils.NumpyToImage(weight, CurrentPath+"image",title="TrainWeight")
|
||||
# bestweight, index = EvaluatorUnsuper.UnsuperLearnFindBestWeight(model, layer, weight, traindata, databatchs=64, interation=1000000)
|
||||
# np.save(CurrentPath+"bestweightTrain.npy", bestweight)
|
||||
# bestweight = np.load(CurrentPath+"bestweightTrain.npy")
|
||||
# utils.NumpyToImage(bestweight, CurrentPath+"image")
|
||||
# utils.NumpyToImage(bestweight, CurrentPath+"image",title="TrainWerightBest")
|
||||
# EvaluatorUnsuper.SetModelConvWeight(model,layer,bestweight)
|
||||
# utils.SaveModel(model,CurrentPath+"/checkpointTrain.pkl")
|
||||
|
||||
|
|
|
@ -66,8 +66,6 @@ def UnsuperLearnSearchWeight(model, layer, dataloader, NumSearch=10000, SaveChan
|
|||
|
||||
for i in range(NumSearch):
|
||||
newlayer.weight.data=utils.SetDevice(torch.from_numpy(newweight[i]))
|
||||
|
||||
|
||||
output = model.ForwardLayer(dataset,layer-1)
|
||||
output = newlayer(output)
|
||||
output = torch.reshape(output.transpose(0,1),(newlayer.out_channels,-1))
|
||||
|
@ -78,11 +76,8 @@ def UnsuperLearnSearchWeight(model, layer, dataloader, NumSearch=10000, SaveChan
|
|||
dat2 = torch.mean(dat2 * dat2,dim=1)
|
||||
score = dat2.cpu().detach().numpy()
|
||||
|
||||
|
||||
# score = GetScore(model, layer, newlayer, dataset)
|
||||
minactive = np.append(minactive, score)
|
||||
minweight = np.concatenate((minweight, newweight[i]))
|
||||
|
||||
index = minactive.argsort()
|
||||
minactive = minactive[index[0:SaveChannel]]
|
||||
minweight = minweight[index[0:SaveChannel]]
|
||||
|
@ -93,7 +88,7 @@ def UnsuperLearnSearchWeight(model, layer, dataloader, NumSearch=10000, SaveChan
|
|||
|
||||
tl.data=utils.SetDevice(torch.from_numpy(minweight[0:tl.out_channels]))
|
||||
interationbar.close()
|
||||
return minweight
|
||||
return minweight, minactive
|
||||
|
||||
def TrainLayer(netmodel, layer, SearchLayer, DataSet, Epoch=100):
|
||||
netmodel.eval()
|
||||
|
|
Before Width: | Height: | Size: 409 B |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 286 B |
Before Width: | Height: | Size: 273 B |
Before Width: | Height: | Size: 273 B |
Before Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 11 KiB |