diff --git a/FilterEvaluator/Evaluator.py b/FilterEvaluator/Evaluator.py index 825be44..a73a7a8 100644 --- a/FilterEvaluator/Evaluator.py +++ b/FilterEvaluator/Evaluator.py @@ -74,8 +74,9 @@ traindata, testdata = Loader.MNIST(batchsize, shuffle=True, resize=7) -bestweight = EvaluatorUnsuper.UnsuperLearnSearchBestWeight(model,layer,traindata,8,20,250000) +bestweight,bestentropy = EvaluatorUnsuper.UnsuperLearnSearchBestWeight(model,layer,traindata,8,50,1000000) np.save(CurrentPath+"bestweightEntropySearch.npy", bestweight) +np.save(CurrentPath+"bestweightEntropySearch_entropy="+str(bestentropy), bestweight) utils.NumpyToImage(bestweight, CurrentPath+"image",title="EntropySearchWerightBest") EvaluatorUnsuper.SetModelConvWeight(model,layer,bestweight) utils.SaveModel(model,CurrentPath+"/checkpointEntropySearch.pkl") diff --git a/FilterEvaluator/EvaluatorUnsuper.py b/FilterEvaluator/EvaluatorUnsuper.py index aa93f33..62669d5 100644 --- a/FilterEvaluator/EvaluatorUnsuper.py +++ b/FilterEvaluator/EvaluatorUnsuper.py @@ -190,6 +190,9 @@ def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, st newlayer = nn.Conv2d(tl.in_channels, stepsize, tl.kernel_size, tl.stride, tl.padding, tl.dilation, tl.groups, tl.bias, tl.padding_mode) newlayer = utils.SetDevice(newlayer) + newweightshape = list(newlayer.weight.data.shape) + newweightshape[0] = stepsize + newweightshaperandom = [newweightshape[-1]*newweightshape[-2],newweightshape[0]*newweightshape[1]] # pre load train data for speed up. datas = [] @@ -208,8 +211,9 @@ def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, st shift.append(1<