add docs for ops
This commit is contained in:
parent
3f6d697cb8
commit
a85fe89cf6
|
|
@ -0,0 +1,3 @@
|
|||
# Operators
|
||||
|
||||
{DOCS}
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import os
|
||||
|
||||
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
src_dir = root_dir + "/include/tim/vx/ops"
|
||||
template_file = "/docs/Operators.md.template"
|
||||
md_file = "/docs/Operators.md"
|
||||
|
||||
def iterfindfiles(path, fnexp):
|
||||
for root, dirs, files in os.walk(path):
|
||||
for filename in files:
|
||||
if filename.endswith(fnexp):
|
||||
yield filename
|
||||
|
||||
|
||||
def find_files_by_pattern(pattern, path='.'):
|
||||
paths = []
|
||||
for filename in iterfindfiles(path, pattern):
|
||||
paths.append(filename)
|
||||
paths.sort()
|
||||
return paths
|
||||
|
||||
BLOCK_NONE = 0
|
||||
BLOCK_START = 1
|
||||
def get_md_blocks(file):
|
||||
blocks = []
|
||||
with open(file) as fhndl:
|
||||
lines = fhndl.readlines()
|
||||
lines_out = []
|
||||
status = BLOCK_NONE
|
||||
for index, line in enumerate(lines):
|
||||
if (status == BLOCK_NONE):
|
||||
if (line.strip() == "/**"):
|
||||
status = BLOCK_START
|
||||
else: # status == BLOCK_START
|
||||
if (line.strip() == "*/"):
|
||||
status = BLOCK_NONE
|
||||
blocks.append(lines_out.copy())
|
||||
lines_out = []
|
||||
elif (line.lstrip().startswith("*")):
|
||||
if (line.lstrip().startswith("*\n")):
|
||||
lines_out.append("\n")
|
||||
else:
|
||||
lines_out.append(line.lstrip()[2:])
|
||||
else:
|
||||
status = BLOCK_NONE
|
||||
lines_out = []
|
||||
return blocks
|
||||
|
||||
all_blocks = []
|
||||
for path in find_files_by_pattern(('.cpp', '.h'), path=src_dir):
|
||||
blocks = get_md_blocks(src_dir + "/" + path)
|
||||
all_blocks.extend(blocks)
|
||||
|
||||
with open(root_dir + template_file) as fhndl:
|
||||
lines = fhndl.readlines()
|
||||
|
||||
new_lines = lines.copy()
|
||||
offset = 0
|
||||
for index, line in enumerate(lines):
|
||||
if line.find("{DOCS}") != -1:
|
||||
# new_lines[index] = new_lines[index].replace('{DOCS}', '{DOCS1}')
|
||||
del new_lines[index + offset]
|
||||
for blocks in all_blocks:
|
||||
for line in blocks:
|
||||
new_lines.insert(index + offset, line)
|
||||
offset += 1
|
||||
new_lines.insert(index + offset, '\n')
|
||||
offset += 1
|
||||
offset -= 1
|
||||
|
||||
with open(root_dir + md_file, mode='w',newline='\n', encoding='UTF-8') as fhndl:
|
||||
fhndl.writelines(new_lines)
|
||||
|
||||
print(root_dir)
|
||||
|
|
@ -29,6 +29,39 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
/**
|
||||
* ## Activation
|
||||
*
|
||||
* Activation functions:
|
||||
*
|
||||
* ```
|
||||
* Relu(x) : max(0, x)
|
||||
*
|
||||
* Relu1(x) : -1 if x <= -1; x if -1 < x < 1; 1 if x >= 1
|
||||
*
|
||||
* Relu6(x) : 0 if x <= 0; x if 0 < x < 6; 6 if x >= 6
|
||||
*
|
||||
* Elu(x) : x if x >= 0 else alpha*(e^x - 1)
|
||||
*
|
||||
* Tanh(x) : (1 - e^{-2x})/(1 + e^{-2x})
|
||||
*
|
||||
* Sigmoid(x) : 1/(1 + e^{-x})
|
||||
*
|
||||
* HardSwish(x) : 0 if x <= -3; x(x + 3)/6 if -3 < x < 3; x if x >= 3
|
||||
*
|
||||
* Mish(x) : x if x >= 0 else alpha * x
|
||||
*
|
||||
* HardSigmoid(x) : min(max(alpha*x + beta, 0), 1)
|
||||
*
|
||||
* SoftRelu(x) : log(1 + e^x). Also known as SoftPlus.
|
||||
*
|
||||
* LeakyRelu(x) : alpha * x if x <= 0; x if x > 0. alpha is a scalar.
|
||||
*
|
||||
* Prelu(x) : alpha * x if x <= 0; x if x > 0. alpha is a tensor.
|
||||
* - axis : Describes the axis of the inputs when coerced to 2D.
|
||||
* ```
|
||||
*/
|
||||
|
||||
#define DECLARE_NO_PARAMETER_ACTIVATION(NAME) \
|
||||
class NAME : public Operation { \
|
||||
public: \
|
||||
|
|
|
|||
|
|
@ -29,6 +29,14 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
/**
|
||||
* ## AddN
|
||||
*
|
||||
* ```
|
||||
* AddN(x) : Input0 + Input1 + ... + InputN
|
||||
* ```
|
||||
*/
|
||||
|
||||
class AddN : public Operation {
|
||||
public:
|
||||
AddN(Graph* graph, uint32_t num_inputs);
|
||||
|
|
|
|||
|
|
@ -29,6 +29,13 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
/**
|
||||
* ## ArgMin/ArgMax
|
||||
*
|
||||
* Computes the indices of the **min/max** elements of the input tensor's element
|
||||
* along the provided **axis**. The type of the output tensor is integer.
|
||||
*/
|
||||
|
||||
#define DECLARE_ARG_OP(NAME) \
|
||||
class Arg##NAME : public Operation { \
|
||||
public: \
|
||||
|
|
|
|||
|
|
@ -32,6 +32,17 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
/**
|
||||
* ## Batch2Space
|
||||
*
|
||||
* This operation reshapes the batch dimension (dimension 0) into M + 1 dimensions
|
||||
* of shape **block_size** + [batch], interleaves these blocks back into the grid
|
||||
* defined by the spatial dimensions [1, ..., M], to obtain a result with the same
|
||||
* rank as the input.
|
||||
*
|
||||
* - crop : corp the output tensor for ROI usage.
|
||||
*/
|
||||
|
||||
class Batch2Space : public Operation {
|
||||
public:
|
||||
Batch2Space(Graph* graph, const std::vector<int>& block_size,
|
||||
|
|
|
|||
|
|
@ -29,6 +29,16 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
/**
|
||||
* ## BatchNorm
|
||||
*
|
||||
* Carries out batch normalization as described in the paper
|
||||
* https://arxiv.org/abs/1502.03167.
|
||||
*
|
||||
* $$\hat x_i\leftarrow \frac{x_i-\mu_\mathcal{B}}{\sqrt{\sigma_\mathcal{B}^2+\epsilon}}$$
|
||||
*
|
||||
* $$y_i=\gamma\hat x_i+\beta\equiv BN_{\gamma,\beta}(x_i)$$
|
||||
*/
|
||||
|
||||
class BatchNorm : public Operation {
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -30,6 +30,11 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
/**
|
||||
* ## Clip
|
||||
*
|
||||
* Clip(x) : min if x <= min; x if min < x < max; max if x >= max
|
||||
*/
|
||||
|
||||
class Clip : public Operation {
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -29,6 +29,13 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
/**
|
||||
* ## Concat
|
||||
*
|
||||
* Concatenate a list of tensors into a single tensor.
|
||||
* - axis : Which axis to concat on.
|
||||
*/
|
||||
|
||||
class Concat : public Operation {
|
||||
public:
|
||||
Concat(Graph* graph, uint32_t axis, int input_cnt);
|
||||
|
|
|
|||
|
|
@ -32,6 +32,23 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
/**
|
||||
* ## Conv2d
|
||||
*
|
||||
* Performs a 2-D convolution operation, include classic Conv2D /
|
||||
* Depthwise Conv2D / Group Conv2D / Dilation Conv2D.
|
||||
*
|
||||
* - weights : the channel number for weight tensor.
|
||||
* - ksize : the height and width for weight tensor.
|
||||
* - padding : AUTO, VALID or SAME.
|
||||
* - pad : pad value for each spatial axis.
|
||||
* - stride : stride along each spatial axis.
|
||||
* - dilation : dilation value along each spatial axis of the filter.
|
||||
* - multiplier: function similar to group attribute on other framework,
|
||||
* but the value is different. multiplier = weights / group.
|
||||
* - layout : WHCN or CWHN.
|
||||
*/
|
||||
|
||||
class Conv2d : public Operation {
|
||||
public:
|
||||
Conv2d(Graph* graph, int32_t weights, PadType padding,
|
||||
|
|
|
|||
|
|
@ -32,6 +32,23 @@ namespace tim {
|
|||
namespace vx {
|
||||
namespace ops {
|
||||
|
||||
/**
|
||||
* ## DeConv2d
|
||||
*
|
||||
* Performs the transpose of 2-D convolution operation.
|
||||
*
|
||||
* This operation is sometimes called "deconvolution" after Deconvolutional Networks,
|
||||
* but is actually the transpose (gradient) of Conv2D rather than an actual deconvolution.
|
||||
*
|
||||
* - weights : the channel number for weight tensor.
|
||||
* - ksize : the height and width for weight tensor.
|
||||
* - padding : AUTO, VALID or SAME.
|
||||
* - pad : pad value for each spatial axis.
|
||||
* - stride : stride along each spatial axis.
|
||||
* - output_padding : specifying the amount of padding along the height and width of
|
||||
* the output tensor.
|
||||
*/
|
||||
|
||||
class DeConv2d : public Operation {
|
||||
public:
|
||||
DeConv2d(Graph* graph, int32_t oc_count_, PadType pad_type,
|
||||
|
|
|
|||
Loading…
Reference in New Issue