diff --git a/FilterEvaluator/Evaluator.py b/FilterEvaluator/Evaluator.py index 937b9bd..bd13e97 100644 --- a/FilterEvaluator/Evaluator.py +++ b/FilterEvaluator/Evaluator.py @@ -16,7 +16,6 @@ import random import cv2 - CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/" print("Current Path :" + CurrentPath) diff --git a/FilterEvaluator/EvaluatorUnsuper.py b/FilterEvaluator/EvaluatorUnsuper.py index 608a878..4cd111d 100644 --- a/FilterEvaluator/EvaluatorUnsuper.py +++ b/FilterEvaluator/EvaluatorUnsuper.py @@ -16,6 +16,7 @@ import random import cv2 from tqdm import tqdm import threading +from itertools import combinations CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/" @@ -26,6 +27,19 @@ import Model as Model from tools import utils, Train, Loader + + + + +# iterable = combinations(range(72), 4) +# count = iterable.count() +# pool = tuple(iterable) +# n = len(pool) +# indices = sorted(random.sample(range(n), 2)) +# fdafda = tuple(pool[i] for i in indices) + + + def GetScore(netmodel,layer,SearchLayer,DataSet): netmodel.eval() sample = utils.SetDevice(torch.empty((SearchLayer.out_channels,0))) @@ -184,10 +198,12 @@ def UnsuperLearnFindBestWeight(netmodel, layer, weight, dataloader, databatchs=1 # search best weight from random data -def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, stepsize=1000, interation=1000): +# Test total sample size = stepsize * stepsize * interation +# Random kernel number = in_channels * kernel_size[0] * kernel_size[1] * 16 +def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, stepsize=20, interation=1000): interationbar = tqdm(total=interation) forwardlayer = layer -1 - + samplesize = stepsize*stepsize netmodel.eval() tl = netmodel.features[layer] outchannels = tl.out_channels @@ -211,93 +227,55 @@ def UnsuperLearnSearchBestWeight(netmodel, layer, dataloader, databatchs=128, st for i in range(outchannels): shift.append(1<0: - bitted = bittedset.pop() - hasdata = 1 - bittedLock.release() - if hasdata > 0: - entropys = [] - for i in range(len(indexs)): - histced = bitted[:,i].histc(256,0,255).type(torch.float32) - histced = histced[histced>0] - histced = histced/histced.sum() - entropy = (histced.log2()*histced).sum() - entropys.append(entropy.numpy()) - argmin = np.argmin(entropys) - - bestLock.acquire() - if entropys[argmin] < bestentropy[0]: - bestweight = newweight[indexs[argmin]] - bestentropy[0] = entropys[argmin] - print("finded better entropy") - bestLock.release() for j in range(interation): - newweightshape = list(newlayer.weight.data.shape) - newweightshape[0] = stepsize - newweight = np.random.uniform(-1.0,1.0,newweightshape).astype("float32") - newweight = newweight.reshape((-1,newweightshape[-1]*newweightshape[-2])) - newweight = np.swapaxes(newweight,0,1)-np.mean(newweight,-1) - newweight = np.swapaxes(newweight,0,1).reshape(newweightshape) - - newlayer.weight.data=utils.SetDevice(torch.from_numpy(newweight)) - outputs = newlayer(datasnetout).transpose(0,1) - samplesize = stepsize*stepsize - indexs = np.random.randint(0,stepsize,samplesize*outchannels).reshape(samplesize,-1) - # 1000 8 4096 5 5 - outputs = outputs[indexs] - # 102400 1000 8 - reshaped = outputs.reshape(samplesize, outchannels, -1).permute(2, 0, 1) - # 1000 8 + # 400 8 4096 5 5 + output = outputs[indexs[j]] + # 102400 400 8 + reshaped = output.reshape(samplesize, outchannels, -1).permute(2, 0, 1) + # 400 8 meaned = reshaped.mean(0) - # 102400 1000 - bitted = ((reshaped > meaned)* shift).sum(2).type(torch.float32).detach().cpu() + # 102400 400 + # bitted = ((reshaped > meaned)* shift).sum(2).type(torch.float32).detach().cpu() + bitted = ((reshaped > meaned)* shift).sum(2).type(torch.float32) - bittedLock.acquire() - bittedset.append(bitted) - bittedLock.release() - threadcpu = CPU() - threadcpu.start() - - # entropys = [] - # for i in range(len(indexs)): - # histced = bitted[:,i].histc(256,0,255).type(torch.float32) - # histced = histced[histced>0] - # histced = histced/histced.sum() - # entropy = (histced.log2()*histced).sum() - # entropys.append(entropy.detach().cpu().numpy()) - - # argmin = np.argmin(entropys) - # if entropys[argmin] < bestentropy: - # bestweight = newweight[indexs[argmin]] - # bestentropy = entropys[argmin] - + entropys = [] + for i in range(samplesize): + histced = bitted[:,i].histc(256,0,255).type(torch.float32) + histced = histced[histced>0] + histced = histced/histced.sum() + entropy = (histced.log2()*histced).sum() + entropys.append(entropy.detach().cpu().numpy()) + argmin = np.argmin(entropys) + if entropys[argmin] < bestentropy: + bestweight = newweight[indexs[j][argmin]] + bestentropy = entropys[argmin] interationbar.update(1) - interationbar.set_description("left:"+str(len(bittedset))) - - while bittedset: - time.sleep(100) - + interationbar.set_description("entropy:"+str(bestentropy)) interationbar.close() return bestweight - - def SetModelConvWeight(model, layer, weight): w = utils.SetDevice(torch.from_numpy(weight)) model.features[layer].weight.data = w diff --git a/FilterEvaluator/bestweightEntropySearch.npy b/FilterEvaluator/bestweightEntropySearch.npy index 5f64aa9..c9e7d78 100644 Binary files a/FilterEvaluator/bestweightEntropySearch.npy and b/FilterEvaluator/bestweightEntropySearch.npy differ diff --git a/FilterEvaluator/checkpointEntropySearch.pkl b/FilterEvaluator/checkpointEntropySearch.pkl index dbcdf12..a104d5b 100644 Binary files a/FilterEvaluator/checkpointEntropySearch.pkl and b/FilterEvaluator/checkpointEntropySearch.pkl differ