add ResNet weight dump

This commit is contained in:
colin 2020-07-18 11:23:58 +08:00
parent 7ab55557cc
commit ef0dd5bc00
7 changed files with 629 additions and 0 deletions

BIN
CNNDemo/ResNet50Weight.bin Normal file

Binary file not shown.

143
CNNDemo/ResNet50Weight.cc Normal file
View File

@ -0,0 +1,143 @@
int RN50_conv1_weight[]={0,9407,}
int RN50_bn1_running_mean[]={9408,9471,}
int RN50_bn1_running_var[]={9472,9535,}
int RN50_layer1__modules_0_conv1_weight[]={9536,13631,}
int RN50_layer1__modules_0_bn1_running_mean[]={13632,13695,}
int RN50_layer1__modules_0_bn1_running_var[]={13696,13759,}
int RN50_layer1__modules_0_conv2_weight[]={13760,50623,}
int RN50_layer1__modules_0_bn2_running_mean[]={50624,50687,}
int RN50_layer1__modules_0_bn2_running_var[]={50688,50751,}
int RN50_layer1__modules_0_conv3_weight[]={50752,67135,}
int RN50_layer1__modules_0_bn3_running_mean[]={67136,67391,}
int RN50_layer1__modules_0_bn3_running_var[]={67392,67647,}
int RN50_layer1__modules_0_downsample__modules_0_weight[]={67648,84031,}
int RN50_layer1__modules_0_downsample__modules_1_running_mean[]={84032,84287,}
int RN50_layer1__modules_0_downsample__modules_1_running_var[]={84288,84543,}
int RN50_layer2__modules_0_conv1_weight[]={84544,117311,}
int RN50_layer2__modules_0_bn1_running_mean[]={117312,117439,}
int RN50_layer2__modules_0_bn1_running_var[]={117440,117567,}
int RN50_layer2__modules_0_conv2_weight[]={117568,265023,}
int RN50_layer2__modules_0_bn2_running_mean[]={265024,265151,}
int RN50_layer2__modules_0_bn2_running_var[]={265152,265279,}
int RN50_layer2__modules_0_conv3_weight[]={265280,330815,}
int RN50_layer2__modules_0_bn3_running_mean[]={330816,331327,}
int RN50_layer2__modules_0_bn3_running_var[]={331328,331839,}
int RN50_layer2__modules_0_downsample__modules_0_weight[]={331840,462911,}
int RN50_layer2__modules_0_downsample__modules_1_running_mean[]={462912,463423,}
int RN50_layer2__modules_0_downsample__modules_1_running_var[]={463424,463935,}
int RN50_layer2__modules_1_conv1_weight[]={463936,529471,}
int RN50_layer2__modules_1_bn1_running_mean[]={529472,529599,}
int RN50_layer2__modules_1_bn1_running_var[]={529600,529727,}
int RN50_layer2__modules_1_conv2_weight[]={529728,677183,}
int RN50_layer2__modules_1_bn2_running_mean[]={677184,677311,}
int RN50_layer2__modules_1_bn2_running_var[]={677312,677439,}
int RN50_layer2__modules_1_conv3_weight[]={677440,742975,}
int RN50_layer2__modules_1_bn3_running_mean[]={742976,743487,}
int RN50_layer2__modules_1_bn3_running_var[]={743488,743999,}
int RN50_layer2__modules_2_conv1_weight[]={744000,809535,}
int RN50_layer2__modules_2_bn1_running_mean[]={809536,809663,}
int RN50_layer2__modules_2_bn1_running_var[]={809664,809791,}
int RN50_layer2__modules_2_conv2_weight[]={809792,957247,}
int RN50_layer2__modules_2_bn2_running_mean[]={957248,957375,}
int RN50_layer2__modules_2_bn2_running_var[]={957376,957503,}
int RN50_layer2__modules_2_conv3_weight[]={957504,1023039,}
int RN50_layer2__modules_2_bn3_running_mean[]={1023040,1023551,}
int RN50_layer2__modules_2_bn3_running_var[]={1023552,1024063,}
int RN50_layer2__modules_3_conv1_weight[]={1024064,1089599,}
int RN50_layer2__modules_3_bn1_running_mean[]={1089600,1089727,}
int RN50_layer2__modules_3_bn1_running_var[]={1089728,1089855,}
int RN50_layer2__modules_3_conv2_weight[]={1089856,1237311,}
int RN50_layer2__modules_3_bn2_running_mean[]={1237312,1237439,}
int RN50_layer2__modules_3_bn2_running_var[]={1237440,1237567,}
int RN50_layer2__modules_3_conv3_weight[]={1237568,1303103,}
int RN50_layer2__modules_3_bn3_running_mean[]={1303104,1303615,}
int RN50_layer2__modules_3_bn3_running_var[]={1303616,1304127,}
int RN50_layer3__modules_0_conv1_weight[]={1304128,1435199,}
int RN50_layer3__modules_0_bn1_running_mean[]={1435200,1435455,}
int RN50_layer3__modules_0_bn1_running_var[]={1435456,1435711,}
int RN50_layer3__modules_0_conv2_weight[]={1435712,2025535,}
int RN50_layer3__modules_0_bn2_running_mean[]={2025536,2025791,}
int RN50_layer3__modules_0_bn2_running_var[]={2025792,2026047,}
int RN50_layer3__modules_0_conv3_weight[]={2026048,2288191,}
int RN50_layer3__modules_0_bn3_running_mean[]={2288192,2289215,}
int RN50_layer3__modules_0_bn3_running_var[]={2289216,2290239,}
int RN50_layer3__modules_0_downsample__modules_0_weight[]={2290240,2814527,}
int RN50_layer3__modules_0_downsample__modules_1_running_mean[]={2814528,2815551,}
int RN50_layer3__modules_0_downsample__modules_1_running_var[]={2815552,2816575,}
int RN50_layer3__modules_1_conv1_weight[]={2816576,3078719,}
int RN50_layer3__modules_1_bn1_running_mean[]={3078720,3078975,}
int RN50_layer3__modules_1_bn1_running_var[]={3078976,3079231,}
int RN50_layer3__modules_1_conv2_weight[]={3079232,3669055,}
int RN50_layer3__modules_1_bn2_running_mean[]={3669056,3669311,}
int RN50_layer3__modules_1_bn2_running_var[]={3669312,3669567,}
int RN50_layer3__modules_1_conv3_weight[]={3669568,3931711,}
int RN50_layer3__modules_1_bn3_running_mean[]={3931712,3932735,}
int RN50_layer3__modules_1_bn3_running_var[]={3932736,3933759,}
int RN50_layer3__modules_2_conv1_weight[]={3933760,4195903,}
int RN50_layer3__modules_2_bn1_running_mean[]={4195904,4196159,}
int RN50_layer3__modules_2_bn1_running_var[]={4196160,4196415,}
int RN50_layer3__modules_2_conv2_weight[]={4196416,4786239,}
int RN50_layer3__modules_2_bn2_running_mean[]={4786240,4786495,}
int RN50_layer3__modules_2_bn2_running_var[]={4786496,4786751,}
int RN50_layer3__modules_2_conv3_weight[]={4786752,5048895,}
int RN50_layer3__modules_2_bn3_running_mean[]={5048896,5049919,}
int RN50_layer3__modules_2_bn3_running_var[]={5049920,5050943,}
int RN50_layer3__modules_3_conv1_weight[]={5050944,5313087,}
int RN50_layer3__modules_3_bn1_running_mean[]={5313088,5313343,}
int RN50_layer3__modules_3_bn1_running_var[]={5313344,5313599,}
int RN50_layer3__modules_3_conv2_weight[]={5313600,5903423,}
int RN50_layer3__modules_3_bn2_running_mean[]={5903424,5903679,}
int RN50_layer3__modules_3_bn2_running_var[]={5903680,5903935,}
int RN50_layer3__modules_3_conv3_weight[]={5903936,6166079,}
int RN50_layer3__modules_3_bn3_running_mean[]={6166080,6167103,}
int RN50_layer3__modules_3_bn3_running_var[]={6167104,6168127,}
int RN50_layer3__modules_4_conv1_weight[]={6168128,6430271,}
int RN50_layer3__modules_4_bn1_running_mean[]={6430272,6430527,}
int RN50_layer3__modules_4_bn1_running_var[]={6430528,6430783,}
int RN50_layer3__modules_4_conv2_weight[]={6430784,7020607,}
int RN50_layer3__modules_4_bn2_running_mean[]={7020608,7020863,}
int RN50_layer3__modules_4_bn2_running_var[]={7020864,7021119,}
int RN50_layer3__modules_4_conv3_weight[]={7021120,7283263,}
int RN50_layer3__modules_4_bn3_running_mean[]={7283264,7284287,}
int RN50_layer3__modules_4_bn3_running_var[]={7284288,7285311,}
int RN50_layer3__modules_5_conv1_weight[]={7285312,7547455,}
int RN50_layer3__modules_5_bn1_running_mean[]={7547456,7547711,}
int RN50_layer3__modules_5_bn1_running_var[]={7547712,7547967,}
int RN50_layer3__modules_5_conv2_weight[]={7547968,8137791,}
int RN50_layer3__modules_5_bn2_running_mean[]={8137792,8138047,}
int RN50_layer3__modules_5_bn2_running_var[]={8138048,8138303,}
int RN50_layer3__modules_5_conv3_weight[]={8138304,8400447,}
int RN50_layer3__modules_5_bn3_running_mean[]={8400448,8401471,}
int RN50_layer3__modules_5_bn3_running_var[]={8401472,8402495,}
int RN50_layer4__modules_0_conv1_weight[]={8402496,8926783,}
int RN50_layer4__modules_0_bn1_running_mean[]={8926784,8927295,}
int RN50_layer4__modules_0_bn1_running_var[]={8927296,8927807,}
int RN50_layer4__modules_0_conv2_weight[]={8927808,11287103,}
int RN50_layer4__modules_0_bn2_running_mean[]={11287104,11287615,}
int RN50_layer4__modules_0_bn2_running_var[]={11287616,11288127,}
int RN50_layer4__modules_0_conv3_weight[]={11288128,12336703,}
int RN50_layer4__modules_0_bn3_running_mean[]={12336704,12338751,}
int RN50_layer4__modules_0_bn3_running_var[]={12338752,12340799,}
int RN50_layer4__modules_0_downsample__modules_0_weight[]={12340800,14437951,}
int RN50_layer4__modules_0_downsample__modules_1_running_mean[]={14437952,14439999,}
int RN50_layer4__modules_0_downsample__modules_1_running_var[]={14440000,14442047,}
int RN50_layer4__modules_1_conv1_weight[]={14442048,15490623,}
int RN50_layer4__modules_1_bn1_running_mean[]={15490624,15491135,}
int RN50_layer4__modules_1_bn1_running_var[]={15491136,15491647,}
int RN50_layer4__modules_1_conv2_weight[]={15491648,17850943,}
int RN50_layer4__modules_1_bn2_running_mean[]={17850944,17851455,}
int RN50_layer4__modules_1_bn2_running_var[]={17851456,17851967,}
int RN50_layer4__modules_1_conv3_weight[]={17851968,18900543,}
int RN50_layer4__modules_1_bn3_running_mean[]={18900544,18902591,}
int RN50_layer4__modules_1_bn3_running_var[]={18902592,18904639,}
int RN50_layer4__modules_2_conv1_weight[]={18904640,19953215,}
int RN50_layer4__modules_2_bn1_running_mean[]={19953216,19953727,}
int RN50_layer4__modules_2_bn1_running_var[]={19953728,19954239,}
int RN50_layer4__modules_2_conv2_weight[]={19954240,22313535,}
int RN50_layer4__modules_2_bn2_running_mean[]={22313536,22314047,}
int RN50_layer4__modules_2_bn2_running_var[]={22314048,22314559,}
int RN50_layer4__modules_2_conv3_weight[]={22314560,23363135,}
int RN50_layer4__modules_2_bn3_running_mean[]={23363136,23365183,}
int RN50_layer4__modules_2_bn3_running_var[]={23365184,23367231,}
int RN50_fc_weight[]={23367232,25415231,}
int RN50_fc_bias[]={25415232,25416231,}

486
CNNDemo/Resnet50.py Normal file
View File

@ -0,0 +1,486 @@
from __future__ import print_function
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import torchvision.models as models
import matplotlib.pyplot as plt
import numpy as np
import struct
from struct import Struct
CurrentPath = os.path.split(os.path.realpath(__file__))[0]+"/"
resnet50 = models.resnet50(pretrained=True)
torch.save(resnet50, CurrentPath+'params.pth')
resnet50 = torch.load(CurrentPath+'params.pth')
print("===========================")
print("===========================")
print("===========================")
print(resnet50)
print("===========================")
print("===========================")
print("===========================")
# ss = resnet50.conv1.weight.cpu().detach().numpy().reshape(-1)
# ss = ss.tolist()
# strs = ''
# # for s in ss:
# # strs += str(s) + ","
# bs = struct.pack("f",1.0)
# f = open('data.hex', 'wb')
# f.write(bs)
# f.close()
# print(strs)
# ssa = ss.array()
ResNet50 = {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"relu": "ReLU",
"maxpool": "MaxPool2d",
"layer1": {
"_modules": {
"0": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
"downsample": {
"_modules": {
"0": "Conv2d",
"1": "BatchNorm2d",
}
}
}
}
},
"layer2": {
"_modules": {
"0": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
"downsample": {
"_modules": {
"0": "Conv2d",
"1": "BatchNorm2d",
}
}
},
"1": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
},
"2": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
},
"3": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
}
}
},
"layer3": {
"_modules": {
"0": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
"downsample": {
"_modules": {
"0": "Conv2d",
"1": "BatchNorm2d",
}
}
},
"1": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
},
"2": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
},
"3": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
},
"4": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
},
"5": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
}
}
},
"layer4": {
"_modules": {
"0": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
"downsample": {
"_modules": {
"0": "Conv2d",
"1": "BatchNorm2d",
}
}
},
"1": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
},
"2": {
"conv1": "Conv2d",
"bn1": "BatchNorm2d",
"conv2": "Conv2d",
"bn2": "BatchNorm2d",
"conv3": "Conv2d",
"bn3": "BatchNorm2d",
"relu": "ReLU",
}
}
},
"avgpool": "AdaptiveAvgPool2d",
"fc": "Linear"
}
weightfile = open(CurrentPath+'ResNet50Weight.cc', 'w')
binaryfile = open(CurrentPath+'ResNet50Weight.bin', 'wb')
currentbyte = 0
def printDick(d, head, obj):
global currentbyte
global binaryfile
strg = ""
for item in d:
if type(d[item]).__name__ == 'dict':
objsub = getattr(obj, item, '')
if objsub == '':
objsub = obj[item]
strg = strg + printDick(d[item], head+"_"+item, objsub)
else:
objsub = getattr(obj, item, '')
if objsub == '':
objsub = obj[item]
if d[item] == "Conv2d":
strg = strg + "int "+head+"_"+item+"_weight[]={"
array = objsub.weight.cpu().detach().numpy().reshape(-1)
strg += str(currentbyte) + ","
for a in array:
bs = struct.pack("f", a)
binaryfile.write(bs)
currentbyte = currentbyte+1
strg += str(currentbyte-1) + ","
strg = strg + "}\n"
if d[item] == "BatchNorm2d":
strg = strg + "int "+head+"_"+item+"_running_mean[]={"
array = objsub.running_mean.cpu().detach().numpy().reshape(-1)
strg += str(currentbyte) + ","
for a in array:
bs = struct.pack("f", a)
binaryfile.write(bs)
currentbyte = currentbyte+1
strg += str(currentbyte-1) + ","
strg = strg + "}\n"
strg = strg + "int "+head+"_"+item+"_running_var[]={"
array = objsub.running_var.cpu().detach().numpy().reshape(-1)
strg += str(currentbyte) + ","
for a in array:
bs = struct.pack("f", a)
binaryfile.write(bs)
currentbyte = currentbyte+1
strg += str(currentbyte-1) + ","
strg = strg + "}\n"
if d[item] == "Linear":
strg = strg + "int "+head+"_"+item+"_weight[]={"
array = objsub.weight.cpu().detach().numpy().reshape(-1)
strg += str(currentbyte) + ","
for a in array:
bs = struct.pack("f", a)
binaryfile.write(bs)
currentbyte = currentbyte+1
strg += str(currentbyte-1) + ","
strg = strg + "}\n"
strg = strg + "int "+head+"_"+item+"_bias[]={"
array = objsub.bias.cpu().detach().numpy().reshape(-1)
strg += str(currentbyte) + ","
for a in array:
bs = struct.pack("f", a)
binaryfile.write(bs)
currentbyte = currentbyte+1
strg += str(currentbyte-1) + ","
strg = strg + "}\n"
return strg
ss = printDick(ResNet50, "RN50", resnet50)
weightfile.write(ss)
binaryfile.close()
weightfile.close()
print(ss)
print("===========================")
print("===========================")
print("===========================")
# ResNet(
# (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
# (layer1): Sequential(
# (0): Bottleneck(
# (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (downsample): Sequential(
# (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (1): Bottleneck(
# (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# (2): Bottleneck(
# (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# )
# (layer2): Sequential(
# (0): Bottleneck(
# (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (downsample): Sequential(
# (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
# (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (1): Bottleneck(
# (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# (2): Bottleneck(
# (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# (3): Bottleneck(
# (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# )
# (layer3): Sequential(
# (0): Bottleneck(
# (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (downsample): Sequential(
# (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)
# (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (1): Bottleneck(
# (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# (2): Bottleneck(
# (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# (3): Bottleneck(
# (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# (4): Bottleneck(
# (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# (5): Bottleneck(
# (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# )
# (layer4): Sequential(
# (0): Bottleneck(
# (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (downsample): Sequential(
# (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)
# (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (1): Bottleneck(
# (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# (2): Bottleneck(
# (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)
# (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# )
# )
# (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
# (fc): Linear(in_features=2048, out_features=1000, bias=True)
# )

BIN
CNNDemo/params.pth Normal file

Binary file not shown.

BIN
CNNDemo/resnet.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 MiB