From e71d537042b9d0d2773c99520c010d24b9de1680 Mon Sep 17 00:00:00 2001 From: Chen Xin Date: Fri, 17 Feb 2023 14:17:23 +0800 Subject: [PATCH] Fixed deconv2d layout infer bug Type: Bug fix Signed-off-by: Chen Xin --- .../transform/ops/deconv2d_layout_inference.h | 155 +++++++++--------- 1 file changed, 78 insertions(+), 77 deletions(-) diff --git a/src/tim/transform/ops/deconv2d_layout_inference.h b/src/tim/transform/ops/deconv2d_layout_inference.h index 78ca49b..da65a80 100644 --- a/src/tim/transform/ops/deconv2d_layout_inference.h +++ b/src/tim/transform/ops/deconv2d_layout_inference.h @@ -40,92 +40,93 @@ class DeConv2dLayoutInfer : public OpLayoutInfer { void OnInputs( std::vector>& next_tensors) override { + auto src_deconv2d = std::static_pointer_cast(op_); vx::DataLayout layout = op_->impl()->layout_; - auto required_pv = MakeShared(4); - if (layout == vx::DataLayout::CWHN) { - required_pv = std::make_shared>(kCWHN2WHCN); + auto kernel_layout = src_deconv2d->KernelDataLayout(); + std::shared_ptr required_pv, weight_required_pv; + switch (layout) + { // kernel layout must be IWHO in tflite & nnapi + case vx::DataLayout::CWHN: + required_pv = std::make_shared>(kCWHN2WHCN); + break; + case vx::DataLayout::WHCN: + required_pv = MakeShared(4); + break; + default: + VSILOGE("The layout of input is not support."); + required_pv = MakeShared(4); + break; + } + switch (kernel_layout) { + case vx::DataLayout::OcIcWH: // Support TVM Kernel Layout + weight_required_pv = std::make_shared>(kOcIcWH2WHIcOc); + break; + case vx::DataLayout::IcOcWH: + weight_required_pv = std::make_shared>(kIcOcWH2WHIcOc); + break; + case vx::DataLayout::IcWHOc: // Support nnapi & tflite Kernel Layout + weight_required_pv = std::make_shared>(kIcWHOc2WHIcOc); + break; + default: // Default set to IWHO for compatibility with previous APIs + weight_required_pv = std::make_shared>(kIcWHOc2WHIcOc); + break; } - auto src_inputs = op_->impl()->InputsTensor(); - for (const auto& in : src_inputs) { - std::shared_ptr infer_tensor; - std::shared_ptr trans_pv; - if (in->IsConstTensor() && - !(in->GetSpec().attr_ & vx::TensorAttribute::INPUT)) { - // For bias - if (in->GetShape().size() == 1) { - infer_tensor = context_->infer_graph_->CreateTensor(in->GetSpec(), - in->GetDataRef()); - trans_pv = MakeShared(1); - } else { - // For weight - if (!required_pv->IsAligned()) { - auto src_deconv2d = - std::static_pointer_cast(op_); - // Support TVM Kernel Layout - if (src_deconv2d->KernelDataLayout() == vx::DataLayout::OcIcWH) { - trans_pv = std::make_shared>(kOcIcWH2WHIcOc); - infer_tensor = PermuteConstTensor(in, trans_pv); - } else if (src_deconv2d->KernelDataLayout() == - vx::DataLayout::WHIcOc) { - infer_tensor = context_->infer_graph_->CreateTensor( - in->GetSpec(), in->GetDataRef()); - trans_pv = MakeShared(required_pv->Rank()); - } else { - infer_tensor = PermuteConstTensor(in, required_pv); - trans_pv = required_pv; - } - } else { - infer_tensor = context_->infer_graph_->CreateTensor( - in->GetSpec(), in->GetDataRef()); - trans_pv = MakeShared(required_pv->Rank()); - } - } + auto input_tensors = op_->impl()->InputsTensor(); + std::shared_ptr infer_input, infer_weight, infer_bias; + // For input + auto input_pv = context_->GetPermuteVector(input_tensors[0]); + auto final_pv = input_pv->Reverse()->Add(required_pv); + if (!final_pv->IsAligned()) { + infer_input = + InsertPermute(context_->GetMapedTensor(input_tensors[0]), final_pv); + context_->SetPermuteVector(input_tensors[0], required_pv); + } else { + infer_input = context_->GetMapedTensor(input_tensors[0]); + context_->SetPermuteVector(input_tensors[0], input_pv); + } + context_->UpdateTensorMap(input_tensors[0], infer_input); + + // For weight + if (input_tensors[1]->IsConstTensor()) { + if (!weight_required_pv->IsAligned()) { + infer_weight = PermuteConstTensor(input_tensors[1], weight_required_pv); } else { - // For bias - if (in->GetShape().size() == 1) { - infer_tensor = context_->GetMapedTensor(in); - trans_pv = MakeShared(1); - } else { - // For input/weight - auto pv = context_->GetPermuteVector(in); - auto final_pv = pv->Reverse()->Add(required_pv); - if (!final_pv->IsAligned()) { - infer_tensor = - InsertPermute(context_->GetMapedTensor(in), final_pv); - trans_pv = required_pv; - } else { - infer_tensor = context_->GetMapedTensor(in); - trans_pv = pv; - } - } + infer_weight = context_->infer_graph_->CreateTensor( + input_tensors[1]->GetSpec(), input_tensors[1]->GetDataRef()); } - context_->UpdateTensorMap(in, infer_tensor); - context_->SetPermuteVector(in, trans_pv); + context_->SetPermuteVector(input_tensors[1], weight_required_pv); + context_->UpdateTensorMap(input_tensors[1], infer_weight); + } else { + auto weight_pv = context_->GetPermuteVector(input_tensors[1]); + auto final_pv = weight_pv->Reverse()->Add(weight_required_pv); + if (!final_pv->IsAligned()) { + infer_weight = + InsertPermute(context_->GetMapedTensor(input_tensors[1]), final_pv); + context_->SetPermuteVector(input_tensors[1], weight_required_pv); + } else { + infer_weight = context_->GetMapedTensor(input_tensors[1]); + context_->SetPermuteVector(input_tensors[1], weight_pv); + } + context_->UpdateTensorMap(input_tensors[1], infer_weight); } - auto pad_type = - TranslatePadType(op_->impl()->node()->nn_param.deconv.pad_type); - std::array ksize = { - op_->impl()->node()->nn_param.deconv.ksize[0], - op_->impl()->node()->nn_param.deconv.ksize[1]}; - std::array stride = { - op_->impl()->node()->nn_param.deconv.stride[0], - op_->impl()->node()->nn_param.deconv.stride[1]}; - std::array output_padding = { - op_->impl()->node()->nn_param.deconv.output_padding[0], - op_->impl()->node()->nn_param.deconv.output_padding[0]}; - std::array pad = {op_->impl()->node()->nn_param.deconv.pad[0], - op_->impl()->node()->nn_param.deconv.pad[1], - op_->impl()->node()->nn_param.deconv.pad[2], - op_->impl()->node()->nn_param.deconv.pad[3]}; - int32_t oc_count = op_->impl()->node()->nn_param.deconv.weights; - const uint32_t group = op_->impl()->node()->nn_param.deconv.group; + // For bias + if (input_tensors.size() == 3) { + if (input_tensors[2]->IsConstTensor()) { + infer_bias = context_->infer_graph_->CreateTensor( + input_tensors[2]->GetSpec(), input_tensors[2]->GetDataRef()); + } else { + infer_bias = context_->GetMapedTensor(input_tensors[2]); + } + auto bias_pv = MakeShared(1); + context_->UpdateTensorMap(input_tensors[2], infer_bias); + context_->SetPermuteVector(input_tensors[2], bias_pv); + } - auto deconv = context_->infer_graph_->CreateOperation( - oc_count, pad_type, ksize, stride, output_padding, pad, group); + auto deconv = op_->Clone(context_->infer_graph_); auto infer_out = CreateOutputsTensor(required_pv); - for (const auto& i_src : src_inputs) { + for (const auto& i_src : input_tensors) { (*deconv).BindInput(context_->GetMapedTensor(i_src)); } (*deconv).BindOutput(infer_out[0]);