Update simulator SDK to 6.4.6.2

Signed-off-by: Kainan Cha <kainan.zha@verisilicon.com>
This commit is contained in:
Kainan Cha 2021-05-20 02:02:59 +08:00
parent baea9b827f
commit 8ab7759e3c
18 changed files with 799 additions and 248 deletions

View File

@ -1 +1 @@
D312513_A294074_R311680_T312233_O312045
6.4.6.2

View File

@ -379,6 +379,8 @@ VX_API_ENTRY vx_image VX_API_CALL vxCreateVirtualImage(vx_graph graph, vx_uint32
*/
VX_API_ENTRY vx_image VX_API_CALL vxCreateImageFromHandle(vx_context context, vx_df_image color, const vx_imagepatch_addressing_t addrs[], void *const ptrs[], vx_enum memory_type);
VX_API_ENTRY vx_image VX_API_CALL vxCreateImageFromHandleEx(vx_context context, vx_df_image color, const vx_imagepatch_addressing_t addrs[], vx_uint64 handles[], vx_enum memory_type);
/*! \brief Swaps the image handle of an image previously created from handle.
*
* This function sets the new image handle (i.e. pointer to all image planes)
@ -693,8 +695,6 @@ VX_API_ENTRY vx_image VX_API_CALL vxCreateImageFromChannel(vx_image img, vx_enum
*/
VX_API_ENTRY vx_status VX_API_CALL vxSetImageValidRectangle(vx_image image, const vx_rectangle_t *rect);
/*==============================================================================
KERNEL
=============================================================================*/

View File

@ -0,0 +1,189 @@
/*
* Copyright (c) 2012-2020 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _OPENVX_IMPORT_H_
#define _OPENVX_IMPORT_H_
#ifdef __cplusplus
extern "C" {
#endif
/*!
* \file
* \brief The OpenVX Import API
* part of the OpenVX Export and Import extension API
* and also part of the OpenVX SC deployment feature set.
*/
/*! \brief An enumeration of export uses. See <tt>\ref vxExportObjectsToMemory</tt> and
* <tt>\ref vxImportObjectsFromMemory</tt>
* \ingroup vx_enum_e
*/
#define VX_ENUM_IX_USE 0x18
/*! \brief How to export and import an object
* \ingroup group_import
*/
#define VX_IX_USE_APPLICATION_CREATE (VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_IX_USE) + 0x0) /*!< \brief The application will create the object before import. */
/*! \brief How to export and import an object
* \ingroup group_import
*/
#define VX_IX_USE_EXPORT_VALUES (VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_IX_USE) + 0x1) /*!< \brief Data values are exported and restored upon import. */
/*! \brief How to export and import an object
* \ingroup group_import
*/
#define VX_IX_USE_NO_EXPORT_VALUES (VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_IX_USE) + 0x2) /*!< \brief Data values are not exported. */
/*=============================================================================
IMPORT
=============================================================================*/
/*! \brief The Import Object. Import is a container of OpenVX objects, which may be retreived
* by name
* \ingroup group_import
*/
typedef struct _vx_import *vx_import;
/*! \brief The Object Type Enumeration for import.
* \ingroup group_import
*/
#define VX_TYPE_IMPORT 0x814/*!< \brief A <tt>\ref vx_import</tt>. */
/*! \brief Imports objects into a context from a vendor-specific format in memory.\n
*
* \details This function imports objects from a memory blob previously created using <tt>\ref vxExportObjectsToMemory</tt>[*REQ*].\n
* A pointer to memory is given where a list of references is stored, together with the list
* of uses which describes how the references are used. The number of references given and the
* list of uses must match that given upon export, or this function will not be sucessful[*REQ*].\n
* The *uses* array specifies how the objects in the corresponding *refs* array will be imported:
* - <tt>\ref VX_IX_USE_APPLICATION_CREATE</tt>\n
* The application must create the object and supply the reference; the
* meta-data of the object must match exactly the meta-data of the object when it was exported,
* except that the name need not match[*REQ*].\n
* If the supplied reference has a different name to that stored, the supplied name is used[*REQ*].
* - <tt>\ref VX_IX_USE_EXPORT_VALUES</tt>\n
* The implementation will create the object and set the data in it[*REQ*].\n
* Any data not defined at the time of export of the object will be set to a default value (zero in the
* absence of any other definition) upon import[*REQ*].
* - <tt>\ref VX_IX_USE_NO_EXPORT_VALUES</tt>\n
* The implementation will create the object and the importing application will set values as applicable[*REQ*].
*
* References are obtained from the import API for those objects whose references were listed at the time of export.
* These are not the same objects; they are equivalent objects created by the framework at import time.
* The implementation guarantees that references will be available and valid for all objects listed at the time
* of export, or the import will fail[*REQ*].\n
* The import operation will fail if more than one object whose reference is listed at *refs*
* has been given the same non-zero length name (via <tt>\ref vxSetReferenceName</tt>)[*REQ*].\n
* The import will be unsuccessful if any of the parameters supplied is NULL[*REQ*].\n
* After completion of the function the memory at *ptr* may be deallocated by the application as it will
* not be used by any of the created objects[*REQ*].\n
* Any delays imported with graphs for which they are registered for auto-aging remain registered
* for auto-aging[*REQ*].\n
* After import, a graph must execute with exactly the same effect with respect to its visible parameters
* as before export[*REQ*].
* \note The *refs* array must be the correct length to hold all references of the import; this will be the same length
* that was supplied at the time of export. Only references for objects created by the application, where the
* corresponding *uses* entry is <tt>\ref VX_IX_USE_APPLICATION_CREATE</tt> should be filled in by the application;
* all other entries will be supplied by the framework and may be initialised by the application to NULL. The *uses* array
* must have the identical length and content as given at the time of export, and the value of *numrefs* must also match;
* these measures increase confidence that the import contains the correct data.
* \note Graph parameters may be changed after import by using the <tt>\ref vxSetGraphParameterByIndex</tt> API, and
* images may also be changed by using the <tt>\ref vxSwapImageHandle</tt> API.
* When <tt>\ref vxSetGraphParameterByIndex</tt> is used, the framework will check that the new parameter is of the
* correct type to run with the graph, which cannot be re-verified. If the reference supplied is not suitable, an error
* will be returned, but there may be circumstances where changing graph parameters for unsuitable ones is not detected
* and could lead to implementation-dependent behaviour; one such circumstance is when the new parameters are images
* corresponding to overlapping regions of interest. The user should avoid these circumstances.
* In other words,
* - The meta data of the new graph parameter must match the meta data of the graph parameter it replaces [*REQ*].
* - A graph parameter must not be NULL [*REQ*].
* \param [in] context context into which to import objects, must be valid [*REQ*].
* \param [in] numrefs number of references to import, must match export[*REQ*].
* \param [in,out] refs references imported or application-created data which must match
* meta-data of the export[*REQ*]
* \param [in] uses how to import the references, must match export values[*REQ*]
* \param [in] ptr pointer to binary buffer containing a valid binary export[*REQ*]
* \param [in] length number of bytes at \*ptr, i.e. the length of the export[*REQ*]
* \return A <tt>\ref vx_import</tt>[*REQ*].
* Calling <tt>\ref vxGetStatus</tt> with the vx_import as a parameter will return VX_SUCCESS if the
* function was successful[*REQ*].\n
* Another value is given to indicate that there was an error[*REQ*].\n
* An implementation may provide several different error codes to give useful diagnostic information
* in the event of failure to import objects, but these are not required to indicate
* possibly recovery mechanisms, and for safety critical use assume errors are not recoverable.
* \post <tt>\ref vxReleaseImport</tt> is used to release the import object.
* \post Use <tt>\ref vxReleaseReference</tt> or an appropriate specific release function to release
* the references in the array refs when they are no longer required.
* \ingroup group_import
*/
VX_API_ENTRY vx_import VX_API_CALL vxImportObjectsFromMemory(
vx_context context,
vx_size numrefs,
vx_reference *refs,
const vx_enum * uses,
const vx_uint8 * ptr,
vx_size length);
/*! \brief Releases an import object when no longer required.\n
* \details This function releases the reference to the import object [*REQ*].\n
* Other objects including those imported at the time of creation of the import object are unaffected[*REQ*].\n
* \param [in,out] import The pointer to the reference to the import object[*REQ*].
* \post After returning sucessfully from this function the reference is zeroed[*REQ*].
* \return A <tt>\ref vx_status</tt> value.
* \retval VX_SUCCESS If no errors occurred and the import was sucessfully released[*REQ*].\n
* An error is indicated when the return value is not VX_SUCCESS[*REQ*].\n
* An implementation may provide several different return values to give useful diagnostic
* information in the event of failure to export, but these are not required to indicate
* possibly recovery mechanisms, and for safety critical use assume errors are not recoverable.
* \pre <tt>\ref vxImportObjectsFromMemory</tt> is used to create an import object.
* \ingroup group_import
*/
VX_API_ENTRY vx_status VX_API_CALL vxReleaseImport(vx_import *import);
/*! \brief Get a reference from the import object by name.\n
*
* \details All accessible references of the import object created using <tt>\ref vxImportObjectsFromMemory</tt> are
* in the array *refs*, which is populated partly by the application before import, and partly by the
* framework. However, it may be more convenient to access the references in the import object without
* referring to this array, for example if the import object is passed as a parameter to another function.
* In this case, references may be retreived by name, assuming that <tt>\ref vxSetReferenceName</tt>
* was called to assign a name to the reference.
* This function searches the given import for the given name and returns the associated reference[*REQ*].\n
* The reference may have been named either before export or after import[*REQ*].\n
* If more than one reference exists in the import with the given name, this is an error[*REQ*].\n
* Only references in the array *refs* after calling <tt>\ref vxImportObjectsFromMemory</tt> may be retrieved
* using this function[*REQ*].\n
* A reference to a named object may be obtained from a valid import object using this API even if all other
* references to the object have been released[*REQ*].
* \param [in] import The import object in which to find the name; the function will fail if this parameter
* is not valid[*REQ*].
* \param [in] name The name to find, points to a string of at least one and less than VX_MAX_REFERENCE_NAME bytes
* followed by a zero byte; the function will fail if this is not valid[*REQ*].
* \return A <tt>\ref vx_reference</tt>[*REQ*].\n
* Calling <tt>\ref vxGetStatus</tt> with the reference as a parameter will return VX_SUCCESS if the function
* was successful[*REQ*].\n
* Another value is given to indicate that there was an error[*REQ*].\n
* On success, the reference count of the object in question is incremented[*REQ*].\n
* An implementation may provide several different error codes to give useful diagnostic information
* in the event of failure to retrieve a reference, but these are not required to indicate
* possibly recovery mechanisms, and for safety critical use assume errors are not recoverable.
* \pre <tt>\ref vxSetReferenceName</tt> was used to name the reference.
* \post use <tt>ref vxReleaseReference</tt> or appropriate specific release function to release a reference
* obtained by this method.
* \ingroup group_import
*/
VX_API_ENTRY vx_reference VX_API_CALL vxGetImportReferenceByName(vx_import import, const vx_char *name);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -488,6 +488,12 @@ enum vx_kernel_e {
VX_KERNEL_NN_L2NORMALIZE_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2B,
VX_KERNEL_NN_CONVOLUTION_RELU_POOLING_ADD_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2C,
VX_KERNEL_NN_LUT_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2D,
VX_KERNEL_NN_CONVOLUTION_RELU_POOLING_MULTIPLY_LAYER2 = VX_KERNEL_BASE(VX_ID_VIVANTE, VX_LIBRARY_KHR_BASE) + 0x2E,
VX_KERNEL_MAX_1_2, /*!< \internal Used for VX1.2 bounds checking in the conformance test. */
};

View File

@ -72,4 +72,44 @@
*/
#define VX_ACTIVATION_EXT_SUPPORT 1
/*
VX_HARDWARE_CAPS_PARAMS_EXT_SUPPORT is used to query more hardware parameter such as shader sub-group size.
[value]
0: not support
1: support
*/
#define VX_HARDWARE_CAPS_PARAMS_EXT_SUPPORT 1
/*
VX_USER_LOOKUP_TABLE_SUPPORT is used to declare that openvx can support user lookuptable.
[value]
0: not support
1: support
*/
#define VX_USER_LOOKUP_TABLE_SUPPORT 1
/*
VX_PRELOAD_CONST_TENSOR_SUPPORT is used to declare that openvx can support preload weight/bias and const tensor
[value]
0: not support
1: support(NN conv and TP FC weightbias, and SH const tensor)
*/
#define VX_PRELOAD_CONST_TENSOR_SUPPORT 1
/*
VX_CREATE_TENSOR_SUPPORT_PHYSICAL is used to declare that openvx can support physical address for vxCreateTensorFromHandle
[value]
0: not support
1: support
*/
#define VX_CREATE_TENSOR_SUPPORT_PHYSICAL 1
/*
VX_GRAPH_PREEMPTION_SUPPORT is used to declare that openvx can support different graph preemption function.
[value]
0: not support
1: support
*/
#define VX_GRAPH_PREEMPTION_SUPPORT 1
#endif /* __VX_KHR_COMPATIBLE_H__ */

View File

@ -0,0 +1,158 @@
/*
* Copyright (c) 2012-2020 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _OPENVX_IMPORT_EXPORT_H_
#define _OPENVX_IMPORT_EXPORT_H_
/*!
* \file
* \brief The OpenVX Export and Import extension API.
*/
#define OPENVX_KHR_IX "vx_khr_ix"
#include <VX/vx_import.h>
#ifdef __cplusplus
extern "C" {
#endif
/*=============================================================================
Export to host memory
=============================================================================*/
/*! \brief Exports selected objects to memory in a vendor-specific format.\n
*
* \details A list of references in the given context is supplied to this function, and all information
* required to re-create these is stored in memory in such a way that those objects may be re-created
* with the corresponding import function, according to the usage specified by the *uses* parameter[*REQ*].\n
* The information must be context independent in that it may be written to external storage for later
* retreival with another instantiation of a compatible implementation[*REQ*].\n
* The list of objects to export may contain only valid references (i.e. vxGetStatus() will return VX_SUCCESS)
* to vx_graph and non-virtual data objects or the function will fail[*REQ*].
* (Specifically not vx_context, vx_import, vx_node, vx_kernel, vx_parameter or vx_meta_format)\n
* Some node creation functions take C parameters rather than OpenVX data objects (such as the *gradient_size*
* parameter of <tt>\ref vxHarrisCornersNode</tt> that is provided as a vx_int32), because these are intended
* to be fixed at node creation time; nevertheless OpenVX data objects may be assigned to them, for example if
* the <tt>\ref vxCreateGenericNode</tt> API is used.
* A data object corresponding to a node parameter that is intended to be fixed at node creation time must not be
* in the list of exported objects nor attached as a graph parameter or the export operation will fail[*REQ*].\n
* The *uses* array specifies how the objects in the corresponding *refs* array will be exported. A data object
* will always have its meta-data (e.g. dimensions and format of an image) exported, and optionally
* may have its data (e.g. pixel values) exported, and additionally you can decide whether the importing
* application will create data objects to replace those attached to graphs, or if the implementation will
* automatically create them:
* - <tt>\ref VX_IX_USE_APPLICATION_CREATE</tt> \n
* Export sufficient data to check that an application-supplied
* object is compatible when the data is later imported[*REQ*].
* \note This value must be given for images created from handles, or the the export operation
* will fail[*REQ*]
* - <tt>\ref VX_IX_USE_EXPORT_VALUES</tt>\n
* Export complete information (for example image data or value of a
* scalar)[*REQ*].
* - <tt>\ref VX_IX_USE_NO_EXPORT_VALUES</tt>\n
* Export meta-data only; the importing application will set values
* as applicable[*REQ*]
*
* The values in *uses* are applicable only for data objects and are ignored for vx_graph objects[*REQ*].\n
* If the list *refs* contains vx_graph objects, these graphs will be verified during the export operation and the export operation will fail if verification fails; when successfully exported graphs are subsequently imported they will appear as verified [*REQ*].\n
* \note The implementation may also choose to re-verify any previously verified graphs and apply
* optimisations based upon which references are to be exported and how.\n
* Any data objects attached to a graph that are hidden, i.e. their references are not in the list *refs*,
* may be treated by the implementation as virtual objects, since they can never be visible when the graph is
* subsequently imported.\n
* Note that imported graphs cannot become unverified. Attempts to change the
* graph that might normally cause the graph to be unverified, e.g. calling
* vxSetGraphParameterByIndex with an object with different metadata, will fail.\n
* The implementation should make sure that all permissible changes of exported objects are possible
* without re-verification. For example:
* - A uniform image may be swapped for a non-uniform image, so corresponding optimisations should be
* inhibited if a uniform image appears in the *refs* list
* - An image that is a region of interest of another image may be similarly replaced by any other image of
* matching size and format, and vice-versa
*
* If a graph is exported that has delays registered for auto-aging, then this information is also
* exported[*REQ*].\n
* If the function is called with NULL for any of its parameters, this is an error [*REQ*].\n
* The reference counts of objects as visible to the calling application will not be affected
* by calling this function [*REQ*].\n
* The export operation will fail if more than one object whose reference is listed at *refs*
* has been given the same non-zero length name (via <tt>\ref vxSetReferenceName</tt>)[*REQ*].\n
* If a graph listed for export has any graph parameters not listed at *refs*, then the
* export operation will fail[*REQ*].
* \note The order of the references supplied in the *refs* array will be the order in which the
* framwork will supply references for the corresponding import operation with <tt>\ref vxImportObjectsFromMemory</tt>.\n
* The same length of *uses* array, containing the same values, and the same value of *numrefs*, must be supplied
* for the corresponding import operation.
*
* For objects not listed in *refs*, the following rules apply:
* 1. In any one graph, if an object is not connected as an output of a node in a graph being exported
* then its data values will be exported (for subsequent import)[*REQ*].
* 2. Where the object in (1) is a composite object (such as a pyramid) then rule (1) applies to
* all of its sub-objects[*REQ*].
* 3. Where the object in (1) is a sub-object such as a region of interest, and the composite object
* (in this case the parent image) does not meet the conditions of rule (1), then rule (1) applies
* to the sub-object only[*REQ*].
* \param [in] context context from which to export objects, must be valid [*REQ*].
* \param [in] numrefs number of references to export [*REQ*].
* \param [in] refs references to export. This is an array of length numrefs populated with
* the references to export[*REQ*].
* \param [in] uses how to export the references. This is an array of length numrefs containing
* values as described above[*REQ*].
* \param [out] ptr returns pointer to binary buffer. On error this is set to NULL[*REQ*].
* \param [out] length number of bytes at \*ptr. On error this is set to zero[*REQ*].
* \return A <tt>\ref vx_status</tt> value.
* \retval VX_SUCCESS If no errors occurred and the objects were sucessfully exported[*REQ*].
* An error is indicated when the return value is not VX_SUCCESS.\n
* An implementation may provide several different return values to give useful diagnostic
* information in the event of failure to export, but these are not required to indicate
* possible recovery mechanisms, and for safety critical use assume errors are not recoverable.
* \post <tt>\ref vxReleaseExportedMemory</tt> is used to deallocate the memory.
* \ingroup group_import
*/
VX_API_ENTRY vx_status VX_API_CALL vxExportObjectsToMemory(
vx_context context,
vx_size numrefs,
const vx_reference *refs,
const vx_enum * uses,
const vx_uint8 ** ptr,
vx_size * length);
/*! \brief Releases memory allocated for a binary export when it is no longer required.
* \details This function releases memory allocated by <tt>\ref vxExportObjectsToMemory</tt>[*REQ*].
* \param [in] context The context for which <tt>\ref vxExportObjectsToMemory</tt> was called[*REQ*].
* \param [in,out] ptr A pointer previously set by calling <tt>\ref vxExportObjectsToMemory</tt>[*REQ*].
* The function will fail if <code>*ptr</code> does not contain an address of memory previously
* allocated by <tt>\ref vxExportObjectsToMemory</tt>[*REQ*].
* \post After returning from sucessfully from this function \*ptr is set to NULL[*REQ*].
* \return A <tt>\ref vx_status</tt> value.
* \retval VX_SUCCESS If no errors occurred and the memory was sucessfully released[*REQ*].\n
* An error is indicated when the return value is not VX_SUCCESS[*REQ*].\n
* An implementation may provide several different return values to give useful diagnostic
* information in the event of failure to export, but these are not required to indicate
* possible recovery mechanisms, and for safety critical use assume errors are not recoverable.
* \pre <tt>\ref vxExportObjectsToMemory</tt> is used to allocate the memory.
* \ingroup group_import
*/
VX_API_ENTRY vx_status VX_API_CALL vxReleaseExportedMemory(
vx_context context, const vx_uint8 ** ptr);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (c) 2012-2017 The Khronos Group Inc.
*
@ -48,7 +48,14 @@ enum vx_context_attribute_internal_type_e
enum vx_graph_attribute_internal_type_e
{
/*! \brief Queries a graph for its device index (read-write. Use a <tt>\ref vx_uint32</tt> parameter. */
VX_GRAPH_DEVICE_INDEX_VIV = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x0,
/*! \brief Queries a graph for its weight data pre-loading size in vip sram (read-write. Use a <tt>\ref vx_uint32</tt> parameter. */
VX_GRAPH_VIP_SRAM_PRE_LOAD = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x1,
/*! \brief Queries a graph for its weight data pre-loading size in axi sram (read-write. Use a <tt>\ref vx_uint32</tt> parameter. */
VX_GRAPH_AXI_SRAM_PRE_LOAD = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x2,
/*! \brief Queries a graph for its running priority (read-write. Use a <tt>\ref vx_uint32</tt> parameter. */
VX_GRAPH_PRIORITY_VALUE_VIV = VX_ATTRIBUTE_BASE(VX_ID_VIVANTE, VX_TYPE_GRAPH) + 0x3,
};
/*! \brief Size Alignment of User Memory
@ -66,7 +73,7 @@ CONVOLUTIONAL_NETWORK structs and enums
/*! \brief The Neural Network Extension Library Set
* \ingroup group_cnn
*/
#define VX_LIBRARY_KHR_NN_EXTENSION (0x1)
#define VX_LIBRARY_KHR_NN_EXTENSION (0x1)
/*! \brief The list of Neural Network Extension Kernels.
* \ingroup group_cnn
@ -199,12 +206,12 @@ enum vx_nn_activation_function_e
VX_NN_ACTIVATION_RELU1 = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x2,
VX_NN_ACTIVATION_RSQRT = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x3,
VX_NN_ACTIVATION_LEAKYRELU_MAX_POOLING = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x4,
VX_NN_ACTIVATION_NONE = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x5,
VX_NN_ACTIVATION_SWISH = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x6,
VX_NN_ACTIVATION_HSWISH = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x7,
VX_NN_ACTIVATION_SWISH = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x5,
VX_NN_ACTIVATION_HSWISH = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x6,
VX_NN_ACTIVATION_NONE = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_NN_ACTIVATION_FUNCTION_TYPE) + 0x7,
};
/*! \brief The Convolutional network type
/*! \brief The Convolutional network type
* \ingroup group_cnn
*/
enum vx_nn_layer_type_e
@ -253,13 +260,12 @@ enum vx_quantized_format_e
{
/*! \brief Non-quantized data. */
VX_QUANT_NONE = 0x0,
/*! \brief A quantization data type which specifies the fixed point position. */
/*! \brief A quantization data type which specifies the fixed point position for whole tensor. */
VX_QUANT_DYNAMIC_FIXED_POINT = 0x1,
/*! \brief A quantization data type which has scale value and zero point to match with TF and Android NN API */
/*! \brief A quantization data type which has scale value and zero point to match with TF and Android NN API for whole tensor. */
VX_QUANT_AFFINE_SCALE = 0x2,
/*! \brief A quantization data type which has scale value and zero point to match with TF and Android NN API for per channel of tensor. */
VX_QUANT_AFFINE_SCALE_PER_CHANNEL = 0x3,
};
/*! \brief The rank mode of tensor memory.
@ -308,20 +314,6 @@ enum vx_tensor_lifetime_type_e
TENSOR DATA FUNCTIONS
=============================================================================*/
/*! \brief The multi dimensional view data structure.
* \details Used to split tensors into several views. Or concatenate several view into one tensor.
* \see vxCreateTensorFromView
* \ingroup group_tensor
*/
typedef struct _vx_tensor_view_t * vx_tensor_view;
/*! \brief The addressing of a tensor view patch structure is used by the Host only
* to address elements in a tensor view patch.
* \see <tt>\ref vxCopyTensorPatch</tt>
* \ingroup group_tensor
*/
typedef struct _vx_tensor_addressing_t * vx_tensor_addressing;
/*! \brief Create an opaque reference to a tensor view object.
* \details Not guaranteed to exist until the <tt>vx_graph</tt> containing it has been verified.
* \param [in] context The reference to the implementation context.
@ -368,32 +360,12 @@ VX_API_ENTRY vx_tensor_addressing VX_API_CALL vxCreateTensorAddressing(vx_contex
*/
VX_API_ENTRY vx_status VX_API_CALL vxReleaseTensorAddressing(vx_tensor_addressing *tensor_addr);
/* vxCopyTensorPatchForNN11 is for back compatibility with spec 1.1, which is used in nn*/
VX_API_ENTRY vx_status VX_API_CALL vxCopyTensorPatchForNN11(
vx_tensor tensor,
vx_tensor_view view,
vx_tensor_addressing user_addr,
void *user_ptr,
vx_enum usage,
vx_enum user_mem_type
);
/* vxCreateTensorForNN11 is for back compatibility with spec 1.1, which is used in nn*/
VX_API_ENTRY vx_tensor VX_API_CALL
vxCreateTensorForNN11(
vx_context context,
vx_uint32 num_of_dims,
vx_uint32 *sizes,
vx_enum data_format,
vx_int8 fixed_point_pos
);
/*! \brief Creates an array of tensors
* \param [in] context The reference to the overall Context.
* \param [in] count Number of Objects to create in the ObjectArray.
* \param [in] tensor* The tensors array that need add to the ObjectArray.
* \param [in] tensor* The tensors array that need add to the ObjectArray.
*
* \returns An ObjectArray reference <tt>\ref vx_object_array</tt>. Any possible errors preventing a
* \returns An ObjectArray reference <tt>\ref vx_object_array</tt>. Any possible errors preventing a
* successful creation should be checked using <tt>\ref vxGetStatus</tt>. Data objects are not initialized by this function.
*
* \ingroup group_object_array
@ -402,18 +374,18 @@ VX_API_ENTRY vx_object_array VX_API_CALL vxCreateTensorObjectArray(vx_context co
typedef union _vx_tensor_quant_param
{
struct
struct
{
vx_int8 fixed_point_pos; /*!< \brief Specifies the fixed point position when the input element type is int16/int8, if 0 calculations are performed in integer math */
} dfp;
struct
struct
{
vx_float32 scale; /*!< \brief Scale vaule for the quantized value */
vx_int32 zeroPoint; /*!< \brief A 32 bit integer, in range [0, 255] */
} affine;
struct
struct
{
vx_uint32 channelDim; /*!< \brief a 32 bit unsigned integer indicating channel dimension */
vx_uint32 scaleCount; /*!< \brief the size of the scale array, must be equal to size[channelDim] */
@ -468,11 +440,18 @@ VX_API_ENTRY vx_tensor VX_API_CALL vxCreateTensor2(vx_context context, const vx_
*/
VX_API_ENTRY vx_tensor VX_API_CALL vxCreateVirtualTensor2(vx_graph graph, const vx_tensor_create_params_t* tensor_create_params, vx_size size_of_create_params);
/*! \brief Swaps the tensor created from handle.
*\details This function swap tensors logical and physical address.
*\these tensors must have the same proterties expect memory related content.
*\Attention: APP should make sure the cache and memory cohensive for the first call vxSwapTensor
*\version 0.4
/*! \brief Swap tensor handle between two tensors which are created from handle.
* \details These tensors must have the same attributes expect for tensor hanlde.
* for better performance, must make sure the memory referenced by the tensor handle is flushed by using <tt>\ref vxFlushHandle</tt>.
* \param [in] tensor0 The tensor whose handle will be changed to tensor1's.
* \param [in] tensor1 The tensor whose handle will be changed to tensor0's.
* \return A <tt>\ref vx_status_e</tt> enumeration.
* \retval VX_SUCCESS No errors.
* \retval VX_ERROR_INVALID_REFERENCE tensor is not a valid <tt>\ref vx_tensor</tt> reference.
* reference.
* \retval VX_ERROR_INVALID_REFERENCE The tensor0 and tensor1's attributes are not the same.
* \ingroup group_tensor
*\version 0.5
*/
VX_API_ENTRY vx_status VX_API_CALL vxSwapTensor(vx_tensor tensor0, vx_tensor tensor1);
@ -480,26 +459,26 @@ VX_API_ENTRY vx_status VX_API_CALL vxSwapTensor(vx_tensor tensor0, vx_tensor ten
* \param [in] context The reference to the implementation context.
* \param [in] tensor_create_params The <tt>\ref vx_tensor_create_params_t</tt> that points to a parameter structure.
* \param [in] size_of_create_params Size of parameter structure.
* \param [in] addrs The tensor patch addressing structures that define the dimension and stride of pointers. See note below.
* \param [in] addrs The tensor patch addressing structures that define the dimension and stride of pointers. See note below.
* \param [in] ptr The logical pointer of platform-defined references to tensor data.
* \param [in] import_type <tt>\ref vx_memory_type_e</tt>. When giving <tt>\ref VX_MEMORY_TYPE_HOST</tt>
* the \a ptr is assumed to be a HOST accessible pointer to memory.
* \returns An tensor reference <tt>\ref vx_tensor</tt>. Any possible errors preventing a
* \returns An tensor reference <tt>\ref vx_tensor</tt>. Any possible errors preventing a
* successful creation should be checked using <tt>\ref vxGetStatus</tt>.
*
* In order to release the image back to the application we should use <tt>\ref vxSwapTensorHandle</tt>.
*
*
* \ingroup group_tensor
*\version 0.4
*/
VX_API_ENTRY vx_tensor VX_API_CALL vxCreateTensorFromHandle2(
vx_context context, const vx_tensor_create_params_t* tensor_create_params, vx_size size_of_create_params, const vx_tensor_addressing addrs,
vx_context context, const vx_tensor_create_params_t* tensor_create_params, vx_size size_of_create_params, const vx_tensor_addressing addrs,
void * const ptr, vx_enum import_type);
/*
*\ vxo_flushHandle used to support vxo_createTensorFromHandle/vxo_createImageFromHandle
*\once app change the content of tensor/image, app can call vxo_flushHandle to make the cache cohenrence and will get better performance;
*\ Or driver will handle it default, but worst perforamnce.
/*! \brief Flush the memory referenced by reference's handle when it is ready.
* \param [in] ref The reference(image or tensor) which created from handle.
* \return A <tt>\ref vx_status_e</tt> enumeration.;
* \retval VX_ERROR_INVALID_REFERENCE tensor is not a valid <tt>\ref vx_tensor</tt> <tt>\ref vx_image</tt>reference created from Handle.
*/
VX_API_ENTRY vx_status VX_API_CALL vxFlushHandle(vx_reference ref);
@ -559,7 +538,7 @@ typedef struct _vx_nn_convolution_params_t
typedef struct _vx_nn_convolution_params_ext_t
{
vx_nn_convolution_params_t khr; /*!< \brief Khronos standard structure head */
vx_size padding_x_right; /*!< \brief Number of elements added at each side in the right of x dimension of the input,
vx_size padding_x_right; /*!< \brief Number of elements added at each side in the right of x dimension of the input,
"padding_x" is for the left */
vx_size padding_y_bottom; /*!< \brief Number of elements added at each side in the bottom of y dimension of the input.
"padding_y" is for the top */
@ -648,7 +627,7 @@ typedef struct _vx_nn_convolution_params_ext2_t
* The relation between input to output is as follows: \n
* \f$ width_{output} = round(\frac{(width_{input} + 2 * padding_x - kernel_x - (kernel_x -1) * dilation_x)}{skip_x} + 1) \f$\n
* and \n
* \f$ height_{output} = round(\frac{(height + 2 * padding_y - kernel_y - (kernel_y -1) * dilation_y)}{skip_y} + 1) \f$\n
* \f$ height_{output} = round(\frac{(height + 2 * padding_y - kernel_y - (kernel_y -1) * dilation_y)}{skip_y} + 1) \f$\n
* where \f$width\f$ is the size of the input width dimension. \f$height\f$ is the size of the input height dimension.
* \f$width_{output}\f$ is the size of the output width dimension. \f$height_{output}\f$ is the size of the output height dimension.
* \f$kernel_x\f$ and \f$kernel_y\f$ are the convolution sizes in width and height dimensions.
@ -657,11 +636,11 @@ typedef struct _vx_nn_convolution_params_ext2_t
* Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here.
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested.
* The dimension order is [width, height, #IFM, #batches].\n
* \param [in] weights [*static] Weights are 4d tensor with dimensions [kernel_x, kernel_y, #IFM, #OFM]. see <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt> \n Weights data type must match the data type of the inputs. (Kernel parameter #1)
* The dimension order is [width, height, #IFM, #batches].\n
* \param [in] weights [*static] Weights are 4d tensor with dimensions [kernel_x, kernel_y, #IFM, #OFM]. see <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt> \n Weights data type must match the data type of the inputs. (Kernel parameter #1)
* \param [in] biases [*static] Optional, ignored if NULL. The biases, which may be shared (one per ofm) or unshared (one per ofm * output location). The possible layouts are
* either [#OFM] or [width, height, #OFM]. Biases data type must match the data type of the inputs.
* \param [in] convolution_params [static] Pointer to parameters of type <tt>\ref vx_nn_convolution_params_t</tt>.
* either [#OFM] or [width, height, #OFM]. Biases data type must match the data type of the inputs.
* \param [in] convolution_params [static] Pointer to parameters of type <tt>\ref vx_nn_convolution_params_t</tt>.
* \param [in] size_of_convolution_params [static] Size in bytes of convolution_params. Note that this parameter is not counted as one of the kernel parameters.
* \param [out] outputs The output tensor data. Output will have the same number and structure of dimensions as input. Output tensor data type must be same as the inputs.
* \return <tt> vx_node</tt>.
@ -677,8 +656,8 @@ VX_API_ENTRY vx_node VX_API_CALL vxConvolutionLayer(vx_graph graph, vx_tensor in
* round: rounding according the <tt>vx_round_policy_e</tt> enumeration. \n
* saturate: A saturation according the <tt>vx_convert_policy_e</tt> enumeration.
* The saturation is done based on the accumulator_bits parameter.
* According the accumulator_bits, the saturation might not be performed every operation.
* But every a specified amount of operations,
* According the accumulator_bits, the saturation might not be performed every operation.
* But every a specified amount of operations,
* that are suspected to saturate the accumulation bits\n
* The equation for Fully connected layer:\n
* \f$ outputs[i] = ( \sum_{j} saturate(round(inputs[j] \times weights[j,i])))+biasses[i] \f$\n
@ -687,17 +666,17 @@ VX_API_ENTRY vx_node VX_API_CALL vxConvolutionLayer(vx_graph graph, vx_tensor in
* Then down scale is done by picking the results according to a skip jump. The skip is determined by the output size dimensions.
* The relation between input to output is as follows:
* \f$ size_{output} = round(\frac{(size_{input} + 2 * pad)}{skip} + 1) \f$\n
* where \f$size_{input}\f$ is the size of the input dimension.
* \f$size_{output}\f$ is the size of the output dimension.
* where \f$size_{input}\f$ is the size of the input dimension.
* \f$size_{output}\f$ is the size of the output dimension.
* skip is calculated by the relation between input and output.
* rounding is done according to <tt>\ref vx_convolutional_network_rounding_type_e</tt>.
* rounding is done according to <tt>\ref vx_convolutional_network_rounding_type_e</tt>.
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor data. There two possible input layouts:
* 1. [#IFM, #batches]. See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>.
* 2. [width, height, #IFM, #batches]. See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>\n
* 1. [#IFM, #batches]. See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>.
* 2. [width, height, #IFM, #batches]. See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>\n
* In both cases number of batches are optional and may be multidimensional.
* The second option is a special case to deal with convolution layer followed by fully connected.
* The dimension order is [#IFM, #batches]. See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>. Note that batch may be multidimensional.
* The dimension order is [#IFM, #batches]. See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>. Note that batch may be multidimensional.
* \param [in] weights [*static] Number of dimensions equals dim(single input)+1. Single input dims are [width, height, #IFM], with height and #IFM being optional.\n
* \param [in] biases [*static]The biases, which may be shared (one per ofm) or unshared (one per ofm * output location).
* \param [in] pad [static] Number of elements added at each side in the input.
@ -705,7 +684,7 @@ VX_API_ENTRY vx_node VX_API_CALL vxConvolutionLayer(vx_graph graph, vx_tensor in
* \param [in] overflow_policy [static] A <tt> VX_TYPE_ENUM</tt> of the <tt> vx_convert_policy_e</tt> enumeration.
* \param [in] rounding_policy [static] A <tt> VX_TYPE_ENUM</tt> of the <tt> vx_round_policy_e</tt> enumeration.
* \param [in] down_scale_size_rounding [static] Rounding method for calculating output dimensions. See <tt>\ref vx_convolutional_network_rounding_type_e</tt>
* \param [out] outputs The output tensor data. Output dimension layout is [#OFM,#batches]. See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>, where #batches may be multidimensional.
* \param [out] outputs The output tensor data. Output dimension layout is [#OFM,#batches]. See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>, where #batches may be multidimensional.
* \return <tt> vx_node</tt>.
* \retval 0 Node could not be created.
* \retval * Node handle.
@ -713,84 +692,12 @@ VX_API_ENTRY vx_node VX_API_CALL vxConvolutionLayer(vx_graph graph, vx_tensor in
*/
VX_API_ENTRY vx_node VX_API_CALL vxFullyConnectedLayer(vx_graph graph, vx_tensor inputs, vx_tensor weights, vx_tensor biases, vx_enum overflow_policy, vx_enum rounding_policy, vx_tensor outputs);
/*! \brief [Graph] Creates a Convolutional Network Pooling Layer Node.
* \details Pooling is done on the first 2 dimensions or the <tt>\ref vx_tensor</tt>. Therefore, we use here the term x for the first dimension and y for the second.\n
* Pooling operation is a function operation over a rectangle size and then a nearest neighbour down scale.
* Here we use pool_size_x and pool_size_y to specify the rectangle size on which the operation
* is performed. \n
* before the operation is done (average or maximum value). the data is padded in the first 2D with zeros.
* The down scale is done by picking the results according to a skip jump. The skip in the x and y dimension is determined by the output size dimensions.
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, #IFM, #batches].
* See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>
* \param [in] pool_type [static] Either max pooling or average pooling (see <tt>\ref vx_convolutional_network_pooling_type_e</tt>).
* \param [in] pool_size_x [static] Size of the pooling region in the x dimension
* \param [in] pool_size_y [static] Size of the pooling region in the y dimension.
* \param [in] pool_pad_x [static] Padding size in the x dimension.
* \param [in] pool_pad_y [static] Padding size in the y dimension.
* \param [in] rounding [static] The rounding method for calculating output dimensions. See <tt>\ref vx_convolutional_network_rounding_type_e</tt>
* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input.
* \return <tt> vx_node</tt>.
* \retval 0 Node could not be created.
* \retval * Node handle.
* \ingroup group_cnn
*/
VX_API_ENTRY vx_node VX_API_CALL vxPoolingLayer(vx_graph graph, vx_tensor inputs, vx_enum pooling_type,
vx_size pooling_size_x,
vx_size pooling_size_y,
vx_size pooling_padding_x,
vx_size pooling_padding_y,
vx_enum rounding,
vx_tensor outputs);
/*! \brief [Graph] Creates a Convolutional Network Softmax Layer Node.
* \details the softmax function, is a generalization of the logistic function that "squashes" a K-dimensional vector \f$ z \f$ of arbitrary real values to a K-dimensional vector
* \f$ \sigma(z) \f$ of real values in the range (0, 1) that add up to 1. The function is given by:
* \f$ \sigma(z) = \frac{\exp^z}{\sum_i \exp^{z_i}} \f$
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor, with the number of dimensions according to the following scheme.
* In case IFM dimension is 1. Softmax is be calculated on that dimension.
* In case IFM dimension is 2. Softmax is be calculated on the first dimension. The second dimension is batching.
* In case IFM dimension is 3. Dimensions are [Width, Height, Classes]. And Softmax is calculated on the third dimension.
* In case IFM dimension is 4. Dimensions are [Width, Height, Classes, batching]. Softmax is calculated on the third dimension.
* Regarding the layout specification, see <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>.
* \param [out] outputs The output tensor. Output will have the same number of dimensions as input. Output tensor data type must be same as the inputs.
* \ingroup group_cnn
* \return <tt> vx_node</tt>.
* \returns A node reference <tt>\ref vx_node</tt>. Any possible errors preventing a
* successful creation should be checked using <tt>\ref vxGetStatus</tt>.
*/
VX_API_ENTRY vx_node VX_API_CALL vxSoftmaxLayer(vx_graph graph, vx_tensor inputs, vx_tensor outputs);
/*! \brief [Graph] Creates a Convolutional Network Normalization Layer Node.
* \details Normalizing over local input regions. Each input value is divided by \f$ (1+\frac{\alpha}{n}\sum_i x^2_i)^\beta \f$ , where n is the number of elements to normalize across.
* and the sum is taken over the region centred at that value (zero padding is added where necessary).
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, IFM, #batches].
* See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>.
* \param [in] type [static] Either same map or across maps (see vx_convolutional_network_norm_type_e).
* \param [in] norm_size [static] Number of elements to normalize across.
* \param [in] alpha [static] Alpha parameter in the normalization equation.
* \param [in] beta [static ] Beta parameter in the normalization equation.
* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input.
* \ingroup group_cnn
* \return <tt> vx_node</tt>.
* \retval 0 Node could not be created.
* \retval * Node handle.
*/
VX_API_ENTRY vx_node VX_API_CALL vxNormalizationLayer(vx_graph graph, vx_tensor inputs, vx_enum type,
vx_size normalization_size,
vx_float32 alpha,
vx_float32 beta,
vx_tensor outputs);
/*! \brief [Graph] Creates a Convolutional Network Local Response Normalization Layer Node. This function is optional for 8-bit extension with the extension string 'KHR_NN_8'.
* \details Normalizing over local input regions. Each input value is divided by \f$ (\bias+\frac{\alpha}{n}\sum_i x^2_i)^\beta \f$ , where n is the number of elements to normalize across.
* and the sum is taken over a rectangle region centred at that value (zero padding is added where necessary).
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional. Dimension layout is [width, height, IFM, #batches].
* See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>.
* See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>.
* Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8 KHR_NN_16'.
* Since this function is optional for 'KHR_NN_8', so implementations only must support <tt>VX_TYPE_INT16</tt> with fixed_point_position 8.
* \param [in] type [static] Either same map or across maps (see <tt>\ref vx_nn_norm_type_e</tt>).
@ -882,7 +789,7 @@ typedef struct _vx_nn_stride_slice_params_t
:* and the sum is taken over the region centred at that value (zero padding is added where necessary).
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, IFM, #batches].
* See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>.
* See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>.
* \param [in] nomalization_params [static] Pointer to <tt>\ref vx_nn_normalization_params_t </tt> parameter structure.
* \param [in] size_of_normalization_param [static] The size of the parameter structure.
* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input.
@ -908,7 +815,7 @@ VX_API_ENTRY vx_node VX_API_CALL vxNormalizationLayer2(vx_graph graph, vx_tensor
* \returns A node reference <tt>\ref vx_node</tt>. Any possible errors preventing a
* successful creation should be checked using <tt>\ref vxGetStatus</tt>.
*/
VX_API_ENTRY vx_node VX_API_CALL vxActivationLayer(vx_graph graph, vx_tensor inputs, vx_enum function, vx_float32 a,vx_float32 b, vx_tensor outputs);
VX_API_ENTRY vx_node VX_API_CALL vxActivationLayer(vx_graph graph, vx_tensor inputs, vx_enum function, vx_float32 a,vx_float32 b, vx_tensor outputs);
/*! \brief [Graph] Creates a Convolutional Network ROI pooling node
* \details Pooling is done on the width and height dimensions of the <tt>\ref vx_tensor</tt>. The ROI Pooling get an array of roi rectangles, and an input tensor.
@ -916,9 +823,9 @@ VX_API_ENTRY vx_node VX_API_CALL vxActivationLayer(vx_graph graph, vx_tensor inp
* The down scale method is determined by the pool_type.
* Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here.
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional. Dimension layout is [width, height, #IFM, #batches].
* See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>.
* Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. (Kernel parameter #0)
* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional. Dimension layout is [width, height, #IFM, #batches].
* See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>.
* Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. (Kernel parameter #0)
* \param [in] inputs_rois The roi array tensor. ROI array with dimensions [4, roi_count, #batches] where the first dimension represents 4 coordinates of the top left and bottom right corners of the roi rectangles, based on the input tensor width and height.
* #batches is optional and must be the same as in inputs. roi_count is the number of ROI rectangles. (Kernel parameter #1)
* \param [in] pool_type [static] Of type <tt>\ref vx_nn_pooling_type_e</tt>. Only <tt>\ref VX_NN_POOLING_MAX</tt> pooling is supported. (Kernel parameter #2)
@ -930,13 +837,13 @@ VX_API_ENTRY vx_node VX_API_CALL vxActivationLayer(vx_graph graph, vx_tensor inp
* successful creation should be checked using <tt>\ref vxGetStatus</tt>.
*/
VX_API_ENTRY vx_node VX_API_CALL vxROIPoolingLayer(vx_graph graph, vx_tensor input_data, vx_tensor input_rois, const vx_nn_roi_pool_params_t *roi_pool_params, vx_size size_of_roi_params, vx_tensor output_arr);
/*! \brief [Graph] Creates a Convolutional Network Deconvolution Layer Node.
* \details Deconvolution denote a sort of reverse convolution, which importantly and confusingly is not actually a proper mathematical deconvolution.
* Convolutional Network Deconvolution is up-sampling of an image by learned Deconvolution coefficients.
* The operation is similar to convolution but can be implemented by up-sampling the inputs with zeros insertions between the inputs,
* and convolving the Deconvolution kernels on the up-sampled result.
* and convolving the Deconvolution kernels on the up-sampled result.
* For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined,
* and should be at least 16.\n
* round: rounding according the <tt>vx_round_policy_e</tt> enumeration. \n
@ -950,7 +857,7 @@ VX_API_ENTRY vx_node VX_API_CALL vxROIPoolingLayer(vx_graph graph, vx_tensor inp
* The relation between input to output is as follows: \n
* \f$ width_{output} = (width_{input} -1) * upscale_x - 2 * padding_x + kernel_x + a_x \f$\n
* and \n
* \f$ height_{output} = (height_{input} - 1) * upscale_y - 2 * padding_y + kernel_y + a_y \f$\n
* \f$ height_{output} = (height_{input} - 1) * upscale_y - 2 * padding_y + kernel_y + a_y \f$\n
* where \f$width_{input}\f$ is the size of the input width dimension. \f$height_{input}\f$ is the size of the input height dimension.
* \f$width_{output}\f$ is the size of the output width dimension. \f$height_{output}\f$ is the size of the output height dimension.
* \f$kernel_x\f$ and \f$kernel_y\f$ are the convolution sizes in width and height. \f$a_x\f$ and \f$a_y\f$ are user-specified quantity used to distinguish between the \f$upscale_x\f$ and \f$upscale_y\f$ different possible output sizes.
@ -964,9 +871,9 @@ VX_API_ENTRY vx_node VX_API_CALL vxROIPoolingLayer(vx_graph graph, vx_tensor inp
* Notice that this node creation function has more parameters than the corresponding kernel. Numbering of kernel parameters (required if you create this node using the generic interface) is explicitly specified here.
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Dimension layout is [width, height, #IFM, #batches].
* See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>.
* See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>.
* Implementations must support input tensor data types indicated by the extension strings 'KHR_NN_8' or 'KHR_NN_8 KHR_NN_16'. (Kernel parameter #0)
* \param [in] weights [static] The 4d weights with dimensions [width, height, #IFM, #OFM]. See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>. (Kernel parameter #1)
* \param [in] weights [static] The 4d weights with dimensions [width, height, #IFM, #OFM]. See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>. (Kernel parameter #1)
* \param [in] biases [static] Optional, ignored if NULL. The biases have one dimension [#OFM]. Implementations must support input tensor data type same as the inputs. (Kernel parameter #2)
* \param [in] deconvolution_params [static] Pointer to parameters of type <tt>\ref vx_nn_deconvolution_params_t</tt> (Kernel parameter #3)
* \param [in] size_of_deconv_params [static] Size in bytes of deconvolution_params. Note that this parameter is not counted as one of the kernel parameters.
@ -990,7 +897,7 @@ VX_API_ENTRY vx_node VX_API_CALL vxDeconvolutionLayer(vx_graph graph, vx_tensor
* \ingroup group_cnn
*/
VX_API_ENTRY vx_node VX_API_CALL vxLeakyReluLayer(
vx_graph graph,
vx_graph graph,
vx_tensor inputs,
vx_float32 negative_slope,
vx_tensor outputs
@ -1009,7 +916,7 @@ VX_API_ENTRY vx_node VX_API_CALL vxLeakyReluLayer(
* \version 0.5
*/
VX_API_ENTRY vx_node VX_API_CALL vxPReluLayer(
vx_graph graph,
vx_graph graph,
vx_tensor inputs,
vx_tensor alpha,
vx_tensor outputs
@ -1057,14 +964,14 @@ VX_API_ENTRY vx_node VX_API_CALL vxConcat2Layer(
vx_tensor in0,
vx_tensor in1,
vx_tensor out
);
);
/*! \brief parameter for vxConcatIndefiniteLayer
* \ingroup group_cnn
* \version 0.4
*/
typedef struct _vx_nn_concat_params_t
{
{
vx_uint32 axis; /*!< \brief The axis on which we need do concat. */
} vx_nn_concat_params_t;
@ -1087,29 +994,6 @@ VX_API_ENTRY vx_node VX_API_CALL vxConcatIndefiniteLayer(
vx_tensor out
);
/*! \brief [Graph] Creates a Reorgnization Layer Node.
* \details Reorganize the layer. Picking up pixels from input tensor according to the rule \n
* dimension 1: i * stride + (k / out_c) % stride \n
* dimension 2: j * stride + (k / out_c) / stride \n
* dimension 3: k % out_c \n
* out_c = input_c / (stride * stride), i is in range (0, input_w-1), j is in range (0, input_h-1), k is in range (0, input_c-1)
* Output value is in order sequence.
* \param [in] graph The reference to the parent graph.
* \param [in] inputs The input tensor data to reorg.
* \param [in] stride [static] Delta size of two pixels in each dimensions to do a reorg operation.
* \param [out] outputs The output tensor data. Output will have different number of each dimensions as input.
* \returns <tt> vx_node</tt>.
* \retval 0 Node could not be created.
* \retval * Node handle.
* \ingroup group_cnn
*/
VX_API_ENTRY vx_node VX_API_CALL vxReorgLayer(
vx_graph graph,
vx_tensor inputs,
vx_uint32 stride,
vx_tensor outputs
);
/*! \brief The type list of reorgnization.
* \ingroup group_cnn
* \version 0.4
@ -1132,7 +1016,7 @@ enum vx_reorg_type_e
VX_REORG_SHUFFLE_CHANNEL,
};
/*! \brief Input parameter for reorg layer
/*! \brief Input parameter for reorg layer
*\ingroup group_cnn
*\version 0.4
*/
@ -1155,7 +1039,7 @@ typedef struct _vx_nn_reorg_params_ext_t
typedef struct _vx_nn_reorg_params_ext2_t
{
vx_nn_reorg_params_t base; /*!< \brief vx_nn_reorg_params <tt>\ref vx_nn_reorg_params_t</tt> */
vx_int32 *num_group;
vx_int32 *num_group;
vx_int32 *axis;
} vx_nn_reorg_params_ext2_t;
@ -1172,7 +1056,7 @@ typedef struct _vx_nn_reorg_params_ext2_t
* \version 0.4
*/
VX_API_ENTRY vx_node VX_API_CALL vxReorgLayer2(
vx_graph graph,
vx_graph graph,
vx_tensor input,
const vx_nn_reorg_params reorg_params,
vx_size size_of_reorg_params,
@ -1201,7 +1085,7 @@ typedef struct _vx_nn_rounding_params_t
* \version 0.4
*/
VX_API_ENTRY vx_node VX_API_CALL vxTensorRoundingNode(
vx_graph graph,
vx_graph graph,
vx_tensor input,
const vx_nn_rounding_params rounding_params,
vx_size size_of_rounding_params,
@ -1236,7 +1120,7 @@ typedef struct _vx_nn_hashlut_params_t
* \version 0.4
*/
VX_API_ENTRY vx_node VX_API_CALL vxHashTableLookupLayer(
vx_graph graph,
vx_graph graph,
vx_tensor input,
const vx_nn_hashlut_params hashlut_params,
vx_size size_of_hashlut_params,
@ -1282,7 +1166,7 @@ typedef struct _vx_nn_lshproj_params_t
* \param [in] lshproj_params Pointer to parameters of type <tt>\ref vx_nn_lshproj_params</tt>
* \param [in] size_of_lshproj_params [static] Size in bytes of vx_nn_lshproj_params.
* \param [out] output The output tensor data.
* If the projection type is sparse:
* If the projection type is sparse:
* Output.Dim == { Tensor[0].Dim[0] }
* A tensor that represents hash signatures.
* If the projection type is Dense:
@ -1295,7 +1179,7 @@ typedef struct _vx_nn_lshproj_params_t
* \version 0.4
*/
VX_API_ENTRY vx_node VX_API_CALL vxLSHProjectionLayer(
vx_graph graph,
vx_graph graph,
vx_tensor input,
const vx_nn_lshproj_params lshproj_params,
vx_size size_of_lshproj_params,
@ -1308,7 +1192,7 @@ VX_API_ENTRY vx_node VX_API_CALL vxLSHProjectionLayer(
*/
typedef struct _vx_nn_reshape_params_t
{
vx_tensor dims; /*!< \brief dimension. */
vx_tensor dims; /*!< \brief dimension. */
} vx_nn_reshape_params_t, * vx_nn_reshape_params;
/*! \brief [Graph] Creates a Reshape Layer Node.
@ -1324,7 +1208,7 @@ typedef struct _vx_nn_reshape_params_t
* \version 0.4
*/
VX_API_ENTRY vx_node VX_API_CALL vxTensorReshapeNode(
vx_graph graph,
vx_graph graph,
vx_tensor input,
const vx_nn_reshape_params reshape_params,
vx_size size_of_reshape_params,
@ -1353,7 +1237,7 @@ typedef struct _vx_nn_scale_params_t
* \version 0.4
*/
VX_API_ENTRY vx_node VX_API_CALL vxTensorScaleNode(
vx_graph graph,
vx_graph graph,
vx_tensor input,
const vx_nn_scale_params scale_params,
vx_size size_of_scale_params,
@ -1373,6 +1257,11 @@ typedef struct _vx_nn_yuv2rgb_scale_params_t
vx_float32 scale_rgb; /*!< \brief Scale coefficient value for output rgb; Not the scale ratio; */
vx_bool y_only; /*!< \brief YUV mode, Y only or normal YUV. */
vx_bool output_rgb; /*!< \brief Output mode, BGR or RGB. */
vx_bool output_roi; /*!< \brief Output full image or partial region of image. Default is full image. */
vx_uint8 fill_r; /*!< \brief R channel value of output image pad. */
vx_uint8 fill_g; /*!< \brief G channel value of output image pad. */
vx_uint8 fill_b; /*!< \brief B channel value of output image pad. */
vx_rectangle_t output_rect; /*!< \brief The rectangle region of output image. It should be smaller than input image. If output_roi is false, this parameter will be ignored.*/
} vx_nn_yuv2rgb_scale_params_t, * vx_nn_yuv2rgb_scale_params;
/*! \brief [Graph] Creates a scale Layer Node.
@ -1412,7 +1301,7 @@ typedef struct _vx_nn_rnn_params_t
* \details A basic recurrent neural network layer.
* This layer implements the operation:
* outputs = state = activation(inputs * input_weights + state * recurrent_weights + bias)
*
*
* Where:
* "input_weights" is a weight matrix that multiplies the inputs;
* "recurrent_weights" is a weight matrix that multiplies the current
@ -1434,7 +1323,7 @@ typedef struct _vx_nn_rnn_params_t
* \version 0.4
*/
VX_API_ENTRY vx_node VX_API_CALL vxRNNLayer(
vx_graph graph,
vx_graph graph,
vx_tensor input,
const vx_nn_rnn_params rnn_params,
vx_size size_of_rnn_params,
@ -1474,7 +1363,7 @@ typedef struct _vx_nn_softmax_params_ext_t
* \version 0.4
*/
VX_API_ENTRY vx_node VX_API_CALL vxSoftmaxLayer2(
vx_graph graph,
vx_graph graph,
vx_tensor input,
const vx_nn_softmax_params softmax_params,
vx_size size_of_softmax_params,
@ -1500,25 +1389,25 @@ typedef struct _vx_nn_svdf_params_t
* densely connected layer that's processing a sequence of input frames can
* be approximated by using a singular value decomposition of each of its
* nodes. The implementation is based on:
*
*
* https://research.google.com/pubs/archive/43813.pdf
*
*
* P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
* "Compressing Deep Neural Networks using a Rank-Constrained Topology".
* INTERSPEECH, 2015.
*
*
* It processes the incoming input using a 2-stage filtering mechanism:
* stage 1 performs filtering on the "features" dimension, whose outputs get
* pushed into a memory of fixed-size memory_size.
* stage 2 performs filtering on the "time" dimension of the memory_size
* memoized outputs of stage 1.
*
*
* Specifically, for rank 1, this layer implements the operation:
*
*
* memory = push(conv1d(inputs, weights_feature, feature_dim,
* "PADDING_VALID"));
* outputs = activation(memory * weights_time + bias);
*
*
* Where:
* "weights_feature" is a weights matrix that processes the inputs (by
* convolving the input with every "feature filter"), and whose outputs get
@ -1530,7 +1419,7 @@ typedef struct _vx_nn_svdf_params_t
* batch); and
* "activation" is the function passed as the "fused_activation_function"
* argument (if not "NONE").
*
*
* Each rank adds a dimension to the weights matrices by means of stacking
* the filters.
* \param [in] graph The reference to the parent graph.
@ -1548,7 +1437,7 @@ typedef struct _vx_nn_svdf_params_t
* \version 0.4
*/
VX_API_ENTRY vx_node VX_API_CALL vxSVDFLayer(
vx_graph graph,
vx_graph graph,
vx_tensor input,
const vx_nn_svdf_params svdf_params,
vx_size size_of_svdf_params,
@ -1577,7 +1466,7 @@ typedef struct _vx_nn_pooling_params_t
* \version 0.4
*/
typedef struct _vx_nn_pooling_params_ext_t
{
{
vx_nn_pooling_params_t base; /*!< \brief The base definition.<tt>\ref vx_nn_pooling_params_t</tt> */
vx_uint32 stride_x; /*!< \brief Skip x jump for down scale. */
vx_uint32 stride_y; /*!< \brief Skip y jump for down scale. */
@ -1593,7 +1482,7 @@ typedef struct _vx_nn_pooling_params_ext_t
* The down scale is done by picking the results according to a skip jump. The skip in the x and y dimension is determined by the output size dimensions.
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, #IFM, #batches].
* See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>
* See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>
* \param [in] pooling_params [static] Pointer to parameters of type <tt>\ref vx_nn_pooling_params_t</tt>
* \param [in] size_of_pooling_params [static] Size in bytes of pooling_params.
* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input.
@ -1611,7 +1500,7 @@ VX_API_ENTRY vx_node VX_API_CALL vxPoolingLayer2(
/*! \brief [Graph] Performs arithmetic addition on element values in the input tensor data's.
* \param [in] graph The handle to the graph.
* \param [in] in1 input tensor data,.
* \param [in] in1 input tensor data,.
* \param [in] in2 input tensor data, inputs must be of equal in dimensions.
* else, If in one of the vx_mddata dimension is 1.
* That dimension is considered as a const on all the dimension terms.
@ -1745,22 +1634,10 @@ typedef struct _vx_nn_l2norm_params_t
vx_int32 axis;
} vx_nn_l2norm_params_t;
/*! \brief [Graph] Creates a Convolutional Network L2Normalize Layer Node.
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Dimension layout is [width, height, #IFM, #batches].
* See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>.
* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input.
* \ingroup group_cnn
* \return <tt> vx_node</tt>.
* \retval 0 Node could not be created.
* \retval * Node handle.
*/
VX_API_ENTRY vx_node VX_API_CALL vxL2NormalizeLayer(vx_graph graph, vx_tensor inputs, vx_tensor outputs);
/*! \brief [Graph] Creates a Convolutional Network L2Normalize Layer2 Node.
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Dimension layout is [width, height, #IFM, #batches].
* See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>.
* See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>.
* \param [in] l2norm_params [static] Pointer to parameters of type <tt>\ref vx_nn_l2norm_params</tt>
* \param [in] size_of_l2norm_params [static] Size in bytes of vx_nn_l2norm_params.
* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input.
@ -1770,9 +1647,9 @@ VX_API_ENTRY vx_node VX_API_CALL vxL2NormalizeLayer(vx_graph graph, vx_tensor in
* \retval * Node handle.
*/
VX_API_ENTRY vx_node VX_API_CALL vxL2NormalizeLayer2(
vx_graph graph,
vx_tensor inputs,
const vx_nn_l2norm_params_t * l2norm_params,
vx_graph graph,
vx_tensor inputs,
const vx_nn_l2norm_params_t * l2norm_params,
vx_size size_of_l2norm_params,
vx_tensor outputs);
@ -1806,7 +1683,7 @@ typedef struct _vx_nn_rpn_params_t
* \ingroup group_cnn
*/
VX_API_ENTRY vx_node VX_API_CALL vxRPNLayer(
vx_graph graph,
vx_graph graph,
vx_tensor score,
vx_tensor bbox,
vx_tensor anchors,
@ -1827,24 +1704,24 @@ typedef struct _vx_nn_lstm_params_t
vx_tensor input2forget_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, input_size].*/
vx_tensor input2cell_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, input_size].*/
vx_tensor input2output_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, input_size].*/
vx_tensor recurrent2input_weight; /*!< \brief Optional A 2-D tensor of type T, of shape [num_units, output_size]. where "output_size" corresponds to either the number of cell units (i.e., "num_units"), or the second dimension of the "projection_weights", if defined.*/
vx_tensor recurrent2forget_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, output_size].*/
vx_tensor recurrent2cell_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, output_size].*/
vx_tensor recurrent2output_weight; /*!< \brief A 2-D tensor of type T, of shape [num_units, output_size].*/
vx_tensor cell2input_weight; /*!< \brief Optional A 1-D tensor of type T, of shape [num_units].*/
vx_tensor cell2forget_weight; /*!< \brief Optional A 1-D tensor of type T, of shape [num_units].*/
vx_tensor cell2output_weight; /*!< \brief Optional A 1-D tensor of type T, of shape [num_units].*/
vx_tensor input_gate_bias; /*!< \brief Optional A 1-D tensor of type T, of shape [num_units].*/
vx_tensor forget_gate_bias; /*!< \brief A 1-D tensor of type T, of shape [num_units].*/
vx_tensor cell_bias; /*!< \brief A 1-D tensor of type T, of shape [num_units].*/
vx_tensor output_gate_bias; /*!< \brief A 1-D tensor of type T, of shape [num_units].*/
vx_tensor projection_weight; /*!< \brief Optional A 2-D tensor of type T, of shape [output_size, num_units].*/
vx_tensor projection_bias; /*!< \brief Optional A 1-D tensor of type T, of shape [output_size].*/
vx_tensor activation; /*!< \brief Optional. An ActivationFunctionType indicating the activation function. If "NONE" is specified then it results in a linear activation.If "NONE" is specified then it results in a linear activation.*/
vx_tensor cell_clip; /*!< \brief A clipping threshold for the cell state, such that values are bound within [-cell_clip, cell_clip]. If set to 0.0 then clipping is disabled.*/
vx_tensor proj_clip; /*!< \brief A clipping threshold for the output from the projection layer, such that values are bound within [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.*/
@ -1859,9 +1736,9 @@ typedef struct _vx_nn_lstm_params_ext_t
vx_tensor forget_bias; /*!< \brief A bias(float 32) for the forget gate. If set to 0.0f(by default) then bias is ignored.*/
vx_float32 norm_gain; /*!< \brief Float32[static] The layer normalization gain initial value(default is 1.0f).*/
vx_float32 norm_shift; /*!< \brief Float32[static] The layer normalization shift initial value(default is 0.0f).*/
vx_float32 norm_shift; /*!< \brief Float32[static] The layer normalization shift initial value(default is 0.0f).*/
vx_tensor sequence_length; /*!< \brief Optional[static] Specifies the length of each sequence in inputs. An `int32` (tensor) size `[batch_size]`, values in `[0, time_len)` or None(by default).*/
vx_tensor sequence_length; /*!< \brief Optional[static] Specifies the length of each sequence in inputs. An `int32` (tensor) size `[batch_size]`, values in `[0, time_len)` or None(by default).*/
/*Since ANDROID NN API level 29 there are additional inputs to this op:*/
vx_tensor layernorm2input_weight; /*!< \brief [Optional] The input layer normalization weights. A 1 - D tensor of shape[num_units].Used to rescale normalized inputs to activation at input gate.*/
@ -1900,11 +1777,11 @@ typedef struct _vx_nn_lstm_layer_params_ext_t
* Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
* recurrent neural network architectures for large scale acoustic modeling."
* INTERSPEECH, 2014.
*
*
* The coupling of input and forget gate (CIFG) is based on:
* http://arxiv.org/pdf/1503.04069.pdf
* Greff et al. "LSTM: A Search Space Odyssey"
*
*
* The class has the following independently optional inputs:
* * If input gate (if CIFG): "input_to_forget_weights",
* "recurrent_to_input_weights", "cell_to_input_weights", "input_gate_bias".
@ -1924,7 +1801,7 @@ typedef struct _vx_nn_lstm_layer_params_ext_t
* \param [out] scratch A 3-D tensor of type T, of shape [num_cell, 4, batch_size].
* \param [out] output_state_out A 2-D tensor of type T, of shape [output_size, batch_size].
* \param [out] cell_state_out A 2-D tensor of type T, of shape [num_units, batch_size].
* \param [out] output A 2-D tensor of type T, of shape [output_size, batch_size].
* \param [out] output A 2-D tensor of type T, of shape [output_size, batch_size].
* This is effectively the same as the current "output_state" value.
* \return <tt> vx_node</tt>.
* \returns A node reference <tt>\ref vx_node</tt>. Any possible errors preventing a
@ -1959,7 +1836,7 @@ VX_API_ENTRY vx_node VX_API_CALL vxLstmUnitLayer(
* is the batching dimension.
* \param [in] lstm_layer_params LSTM paraments <tt>\ref vx_nn_lstm_layer_params_t </tt>.
* \param [in] size_of_lstm_layer_params [static] The size of the lstm_layer_params.
* \param [out] output A 2-D/3D tensor of type T, of shape [output_size, batch_size] or [output_size, batch_size, time].
* \param [out] output A 2-D/3D tensor of type T, of shape [output_size, batch_size] or [output_size, batch_size, time].
* This is effectively the same as the current "output_state" value.
* \return <tt> vx_node</tt>.
* \returns A node reference <tt>\ref vx_node</tt>. Any possible errors preventing a
@ -1968,7 +1845,7 @@ VX_API_ENTRY vx_node VX_API_CALL vxLstmUnitLayer(
* \version 0.3
*/
VX_API_ENTRY vx_node VX_API_CALL vxLstmLayer(
vx_graph graph,
vx_graph graph,
vx_tensor input,
vx_tensor static_input,
vx_tensor cont,
@ -2029,7 +1906,7 @@ VX_API_ENTRY vx_node VX_API_CALL vxTensorMeanNode(
* \param [in] input A n-D tensor, specifying the tensor to be squeezed.
* \param [in] squeeze_params paraments <tt>\ref vx_nn_squeeze_params_t </tt>.
* \param [in] size_of_squeeze_param [static] The size of the vx_nn_squeeze_params_t.
* \param [out] output A n-D tensor of the same type as input. Contains the same data as input,
* \param [out] output A n-D tensor of the same type as input. Contains the same data as input,
* but has one or more dimensions of size 1 removed.
* \return <tt> vx_node</tt>.
* \returns A node reference <tt>\ref vx_node</tt>. Any possible errors preventing a
@ -2077,6 +1954,15 @@ typedef struct _vx_hardware_caps_params_t
vx_bool evis2; /*!< \brief evs2 If true, hardware support evis2.*/
} vx_hardware_caps_params_t;
/*! \brief Input parameters for query hardware caps.
* \ingroup group_context
*/
typedef struct _vx_hardware_caps_params_ext_t
{
vx_hardware_caps_params_t base;
vx_uint32 subGroupSize; /*!< \brief shader sub-group size.*/
} vx_hardware_caps_params_ext_t;
/*! \brief Queries hardware caps information.
* \param [in] context The reference to the context.
* \param [in] hardware_caps_params <tt>\ref vx_hardware_caps_params_t </tt>.

View File

@ -207,6 +207,17 @@ typedef struct _vx_nn_convolution_relu_pooling_params_ext3_t
vx_enum* interDataType;
} vx_nn_convolution_relu_pooling_params_ext3_t, * vx_nn_convolution_relu_pooling_params_ext3;
typedef struct _vx_nn_convolution_relu_pooling_params_ext4_t
{
vx_nn_convolution_relu_pooling_params_ext3_t ext3; /*!< \brief convolution relu pooling params <tt>\ref vx_nn_convolution_relu_pooling_params__ext_t</tt> */
vx_uint32 poolingStrideX;
vx_uint32 poolingStrideY;
vx_uint32 poolingPadLeft;
vx_uint32 poolingPadRight;
vx_uint32 poolingPadTop;
vx_uint32 poolingPadBottom;
} vx_nn_convolution_relu_pooling_params_ext4_t, * vx_nn_convolution_relu_pooling_params_ext4;
/*! \brief [Graph] Creates a Convolutional Network Convolution and Activation(Relu) and Pooling Layer Node, this fucntion match kronos NN Extension 1.2 verion.
* \details This function implement Convolutional Network Convolution and Activation(Relu) and Pooling layer.
* For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined,
@ -650,6 +661,242 @@ VX_API_ENTRY vx_node VX_API_CALL vxConvLSTMLayer(
vx_tensor output
);
/*! \brief [Graph] Creates a Convolutional Network Pooling Layer Node.
* \details Pooling is done on the first 2 dimensions or the <tt>\ref vx_tensor</tt>. Therefore, we use here the term x for the first dimension and y for the second.\n
* Pooling operation is a function operation over a rectangle size and then a nearest neighbour down scale.
* Here we use pool_size_x and pool_size_y to specify the rectangle size on which the operation
* is performed. \n
* before the operation is done (average or maximum value). the data is padded in the first 2D with zeros.
* The down scale is done by picking the results according to a skip jump. The skip in the x and y dimension is determined by the output size dimensions.
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, #IFM, #batches].
* See <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>
* \param [in] pool_type [static] Either max pooling or average pooling (see <tt>\ref vx_convolutional_network_pooling_type_e</tt>).
* \param [in] pool_size_x [static] Size of the pooling region in the x dimension
* \param [in] pool_size_y [static] Size of the pooling region in the y dimension.
* \param [in] pool_pad_x [static] Padding size in the x dimension.
* \param [in] pool_pad_y [static] Padding size in the y dimension.
* \param [in] rounding [static] The rounding method for calculating output dimensions. See <tt>\ref vx_convolutional_network_rounding_type_e</tt>
* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input.
* \return <tt> vx_node</tt>.
* \retval 0 Node could not be created.
* \retval * Node handle.
* \ingroup group_cnn
*/
VX_API_ENTRY vx_node VX_API_CALL vxPoolingLayer(vx_graph graph, vx_tensor inputs, vx_enum pooling_type,
vx_size pooling_size_x,
vx_size pooling_size_y,
vx_size pooling_padding_x,
vx_size pooling_padding_y,
vx_enum rounding,
vx_tensor outputs);
/*! \brief [Graph] Creates a Convolutional Network Softmax Layer Node.
* \details the softmax function, is a generalization of the logistic function that "squashes" a K-dimensional vector \f$ z \f$ of arbitrary real values to a K-dimensional vector
* \f$ \sigma(z) \f$ of real values in the range (0, 1) that add up to 1. The function is given by:
* \f$ \sigma(z) = \frac{\exp^z}{\sum_i \exp^{z_i}} \f$
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor, with the number of dimensions according to the following scheme.
* In case IFM dimension is 1. Softmax is be calculated on that dimension.
* In case IFM dimension is 2. Softmax is be calculated on the first dimension. The second dimension is batching.
* In case IFM dimension is 3. Dimensions are [Width, Height, Classes]. And Softmax is calculated on the third dimension.
* In case IFM dimension is 4. Dimensions are [Width, Height, Classes, batching]. Softmax is calculated on the third dimension.
* Regarding the layout specification, see <tt>\ref vxCreateTensor</tt> and <tt>\ref vxCreateVirtualTensor</tt>.
* \param [out] outputs The output tensor. Output will have the same number of dimensions as input. Output tensor data type must be same as the inputs.
* \ingroup group_cnn
* \return <tt> vx_node</tt>.
* \returns A node reference <tt>\ref vx_node</tt>. Any possible errors preventing a
* successful creation should be checked using <tt>\ref vxGetStatus</tt>.
*/
VX_API_ENTRY vx_node VX_API_CALL vxSoftmaxLayer(vx_graph graph, vx_tensor inputs, vx_tensor outputs);
/* vxCopyTensorPatchForNN11 is for back compatibility with spec 1.1, which is used in nn*/
VX_API_ENTRY vx_status VX_API_CALL vxCopyTensorPatchForNN11(
vx_tensor tensor,
vx_tensor_view view,
vx_tensor_addressing user_addr,
void *user_ptr,
vx_enum usage,
vx_enum user_mem_type
);
/* vxCreateTensorForNN11 is for back compatibility with spec 1.1, which is used in nn*/
VX_API_ENTRY vx_tensor VX_API_CALL
vxCreateTensorForNN11(
vx_context context,
vx_uint32 num_of_dims,
vx_uint32 *sizes,
vx_enum data_format,
vx_int8 fixed_point_pos
);
/*! \brief [Graph] Creates a Convolutional Network Normalization Layer Node.
* \details Normalizing over local input regions. Each input value is divided by \f$ (1+\frac{\alpha}{n}\sum_i x^2_i)^\beta \f$ , where n is the number of elements to normalize across.
* and the sum is taken over the region centred at that value (zero padding is added where necessary).
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor data. 3 lower dimensions represent a single input, 4th dimension for batch of inputs is optional.Dimension layout is [width, height, IFM, #batches].
* See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>.
* \param [in] type [static] Either same map or across maps (see vx_convolutional_network_norm_type_e).
* \param [in] norm_size [static] Number of elements to normalize across.
* \param [in] alpha [static] Alpha parameter in the normalization equation.
* \param [in] beta [static ] Beta parameter in the normalization equation.
* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input.
* \ingroup group_cnn
* \return <tt> vx_node</tt>.
* \retval 0 Node could not be created.
* \retval * Node handle.
*/
VX_API_ENTRY vx_node VX_API_CALL vxNormalizationLayer(vx_graph graph, vx_tensor inputs, vx_enum type,
vx_size normalization_size,
vx_float32 alpha,
vx_float32 beta,
vx_tensor outputs);
/*! \brief [Graph] Creates a Reorgnization Layer Node.
* \details Reorganize the layer. Picking up pixels from input tensor according to the rule \n
* dimension 1: i * stride + (k / out_c) % stride \n
* dimension 2: j * stride + (k / out_c) / stride \n
* dimension 3: k % out_c \n
* out_c = input_c / (stride * stride), i is in range (0, input_w-1), j is in range (0, input_h-1), k is in range (0, input_c-1)
* Output value is in order sequence.
* \param [in] graph The reference to the parent graph.
* \param [in] inputs The input tensor data to reorg.
* \param [in] stride [static] Delta size of two pixels in each dimensions to do a reorg operation.
* \param [out] outputs The output tensor data. Output will have different number of each dimensions as input.
* \returns <tt> vx_node</tt>.
* \retval 0 Node could not be created.
* \retval * Node handle.
* \ingroup group_cnn
*/
VX_API_ENTRY vx_node VX_API_CALL vxReorgLayer(
vx_graph graph,
vx_tensor inputs,
vx_uint32 stride,
vx_tensor outputs
);
/*! \brief [Graph] Creates a Convolutional Network L2Normalize Layer Node.
* \param [in] graph The handle to the graph.
* \param [in] inputs The input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Dimension layout is [width, height, #IFM, #batches].
* See <tt>\ref vxCreateTensor2</tt> and <tt>\ref vxCreateVirtualTensor2</tt>.
* \param [out] outputs The output tensor data. Output will have the same number of dimensions as input.
* \ingroup group_cnn
* \return <tt> vx_node</tt>.
* \retval 0 Node could not be created.
* \retval * Node handle.
*/
VX_API_ENTRY vx_node VX_API_CALL vxL2NormalizeLayer(vx_graph graph, vx_tensor inputs, vx_tensor outputs);
/*! \brief [Graph] Creates a Convolutional Network Convolution and Activation(Relu) and Pooling and Add Layer Node.
* \details This function implement Convolutional Network Convolution and Activation(Relu) and Pooling and Add layer.
* For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined,
* and should be at least 16.\n
* round: rounding according the <tt>vx_round_policy_e</tt> enumeration. \n
* saturate: A saturation according the <tt>vx_convert_policy_e</tt> enumeration.
* The following equation is implemented: \n
* \f$ outputs[j,k,i] = saturate(round(\sum_{l} (\sum_{m,n} inputs[j-m,k-n,l] \times weights[m,n,l,i])+biasses[j,k,i])) \f$\n
* Where \f$m,n\f$ are indexes on the convolution matrices. \f$ l\f$ is an index on all the convolutions per input.\f$ i\f$ is an index per output.
* \f$ j,k \f$ are the inputs/outputs spatial indexes.
* Convolution is done on the width and height dimensions of the <tt>\ref vx_tensor</tt>. Therefore, we use here the term x for index along the width dimension and y for index along the height dimension.\n
* before the Convolution is done, a padding with zeros of the width and height input dimensions is performed.
* Then down scale is done by picking the results according to a skip jump. The skip in the x and y is determined by the output size dimensions.
* The relation between input to output is as follows: \n
* \f$ width_{output} = round(\frac{(width_{input} + paddingleft_x + paddingright_x - kernel_x - (kernel_x -1) * dilation_x)}{skip_x} + 1) \f$\n
* and \n
* \f$ height_{output} = round(\frac{(height + paddingtop_y + paddingbottom_y - kernel_y - (kernel_y -1) * dilation_y)}{skip_y} + 1) \f$\n
* where \f$width\f$ is the size of the input width dimension. \f$height\f$ is the size of the input height dimension.
* \f$width_{output}\f$ is the size of the output width dimension. \f$height_{output}\f$ is the size of the output height dimension.
* \f$kernel_x\f$ and \f$kernel_y\f$ are the convolution sizes in width and height dimensions.
* skip is calculated by the relation between input and output.
* rounding is done according to <tt>\ref vx_convolutional_network_rounding_type_e</tt>.
* \param [in] graph The handle to the graph.
* \param [in] inputs_conv The input tensor data for convolution. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested.
* \param [in] inputs_add The input tensor data for add. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested.
* The dimension order is [width, height, #IFM, #batches]. \n
* \param [in] weights_biases [static] Point to WeightBiasesParameter data, vx_weights_biases_parameter is an opaque reference.
* \param [in] convolution_relu_pooling_params [static] Pointer to parameters of type <tt>\ref vx_nn_convolution_relu_pooling_params_t</tt>
* \param [in] size_of_convolution_relu_pooling_params [static] Size in bytes of convolution_relu_pooling_params.
* \param [out] outputs_conv The convolution output tensor data. Output will have the same number and structure of dimensions as inputs_conv.
* \param [out] outputs_add The final add output tensor data. Output will have the same number and structure of dimensions as input.
* \return <tt> vx_node</tt>.
* \returns A node reference <tt>\ref vx_node</tt>. Any possible errors preventing a
* successful creation should be checked using <tt>\ref vxGetStatus</tt>.
* \ingroup group_cnn
*/
VX_API_ENTRY vx_node VX_API_CALL vxConvolutionReluPoolingAddLayer2(
vx_graph graph,
vx_tensor inputs_conv,
vx_tensor inputs_add,
vx_weights_biases_parameter weights_biases,
const vx_nn_convolution_relu_pooling_params_t * convolution_relu_pooling_params,
vx_size size_of_convolution_relu_pooling_params,
vx_tensor outputs_conv,
vx_tensor outputs_add);
/*! \brief [Graph] Creates a Convolutional Network Convolution and Activation(Relu) and Pooling and Multiply Layer Node.
* \details This function implement Convolutional Network Convolution and Activation(Relu) and Pooling and Multiply layer.
* For fixed-point data types, a fixed point calculation is performed with round and saturate according to the number of accumulator bits. The number of the accumulator bits are implementation defined,
* and should be at least 16.\n
* round: rounding according the <tt>vx_round_policy_e</tt> enumeration. \n
* saturate: A saturation according the <tt>vx_convert_policy_e</tt> enumeration.
* The following equation is implemented: \n
* \f$ outputs[j,k,i] = saturate(round(\sum_{l} (\sum_{m,n} inputs[j-m,k-n,l] \times weights[m,n,l,i])+biasses[j,k,i])) \f$\n
* Where \f$m,n\f$ are indexes on the convolution matrices. \f$ l\f$ is an index on all the convolutions per input.\f$ i\f$ is an index per output.
* \f$ j,k \f$ are the inputs/outputs spatial indexes.
* Convolution is done on the width and height dimensions of the <tt>\ref vx_tensor</tt>. Therefore, we use here the term x for index along the width dimension and y for index along the height dimension.\n
* before the Convolution is done, a padding with zeros of the width and height input dimensions is performed.
* Then down scale is done by picking the results according to a skip jump. The skip in the x and y is determined by the output size dimensions.
* The relation between input to output is as follows: \n
* \f$ width_{output} = round(\frac{(width_{input} + paddingleft_x + paddingright_x - kernel_x - (kernel_x -1) * dilation_x)}{skip_x} + 1) \f$\n
* and \n
* \f$ height_{output} = round(\frac{(height + paddingtop_y + paddingbottom_y - kernel_y - (kernel_y -1) * dilation_y)}{skip_y} + 1) \f$\n
* where \f$width\f$ is the size of the input width dimension. \f$height\f$ is the size of the input height dimension.
* \f$width_{output}\f$ is the size of the output width dimension. \f$height_{output}\f$ is the size of the output height dimension.
* \f$kernel_x\f$ and \f$kernel_y\f$ are the convolution sizes in width and height dimensions.
* skip is calculated by the relation between input and output.
* rounding is done according to <tt>\ref vx_convolutional_network_rounding_type_e</tt>.
* \param [in] graph The handle to the graph.
* \param [in] inputs_conv The input tensor data for convolution. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested.
* \param [in] inputs_mul The input tensor data for mul. 3 lower dimensions represent a single input, all following dimensions represent number of batches, possibly nested.
* The dimension order is [width, height, #IFM, #batches]. \n
* \param [in] scale A non-negative <tt>\ref VX_TYPE_FLOAT32</tt> multiplied to each product before overflow handling.
* \param [in] weights_biases [static] Point to WeightBiasesParameter data, vx_weights_biases_parameter is an opaque reference.
* \param [in] convolution_relu_pooling_params [static] Pointer to parameters of type <tt>\ref vx_nn_convolution_relu_pooling_params_t</tt>
* \param [in] size_of_convolution_relu_pooling_params [static] Size in bytes of convolution_relu_pooling_params.
* \param [out] outputs_conv The convolution output tensor data. Output will have the same number and structure of dimensions as inputs_conv.
* \param [out] outputs_mul The final mul output tensor data. Output will have the same number and structure of dimensions as input.
* \return <tt> vx_node</tt>.
* \returns A node reference <tt>\ref vx_node</tt>. Any possible errors preventing a
* successful creation should be checked using <tt>\ref vxGetStatus</tt>.
* \ingroup group_cnn
*/
VX_API_ENTRY vx_node VX_API_CALL vxConvolutionReluPoolingMultiplyLayer2(
vx_graph graph,
vx_tensor inputs_conv,
vx_tensor inputs_mul,
vx_float32 input_scale,
vx_weights_biases_parameter weights_biases,
const vx_nn_convolution_relu_pooling_params_t * convolution_relu_pooling_params,
vx_size size_of_convolution_relu_pooling_params,
vx_tensor outputs_conv,
vx_tensor outputs_mul);
/*! \brief [Graph] Performs LUT on element values in the input tensor data's.
* \param [in] graph The handle to the graph.
* \param [in] input input tensor data.
* \param [in] InLut The look-up table of x value, of type <tt>\ref vx_lut</tt>.
* \param [in] OutLut The look-up table of y value, of type <tt>\ref vx_lut</tt>.
* \param [out] output The output tensor data with the same dimensions as the input tensor data's.
* \ingroup group_tensor
* \return <tt> vx_node</tt>.
* \retval 0 Node could not be created.
* \retval * Node handle.
*/
VX_API_ENTRY vx_node VX_API_CALL vxTensorTableLookupLayer(
vx_graph graph,
vx_tensor input,
vx_lut InLut,
vx_lut OutLut,
vx_tensor output);
#ifdef __cplusplus
}
#endif

View File

@ -290,6 +290,20 @@ typedef struct _vx_object_array *vx_object_array;
*/
typedef struct _vx_tensor_t * vx_tensor;
/*! \brief The multi dimensional view data structure.
* \details Used to split tensors into several views. Or concatenate several view into one tensor.
* \see vxCreateTensorFromView
* \ingroup group_tensor
*/
typedef struct _vx_tensor_view_t * vx_tensor_view;
/*! \brief The addressing of a tensor view patch structure is used by the Host only
* to address elements in a tensor view patch.
* \see <tt>\ref vxCopyTensorPatch</tt>
* \ingroup group_tensor
*/
typedef struct _vx_tensor_addressing_t * vx_tensor_addressing;
/*! \brief The weight bias parameter for fused layers
* \ingroup group_cnn
*/
@ -962,6 +976,9 @@ enum vx_node_attribute_e {
* Use a <tt>\ref vx_bool</tt> parameter.
*/
VX_NODE_VALID_RECT_RESET = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x8,
VX_NODE_ATTRIBUTE_CONST_TENSOR_CACHE = VX_ATTRIBUTE_BASE(VX_ID_KHRONOS, VX_TYPE_NODE) + 0x9,
};
/*! \brief The parameter attributes list
@ -1288,6 +1305,8 @@ enum vx_memory_type_e {
VX_MEMORY_TYPE_INTERNAL = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_MEMORY_TYPE) + 0x1,
VX_MEMORY_TYPE_HOST_UNCACHED = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_MEMORY_TYPE) + 0x2,
VX_MEMORY_TYPE_HOST_PHYSICAL = VX_ENUM_BASE(VX_ID_VIVANTE, VX_ENUM_MEMORY_TYPE) + 0x3,
};
/*! \brief The image reconstruction filters supported by image resampling operations.
@ -1390,8 +1409,6 @@ enum vx_parameter_state_e {
* to deference optional parameters until it is certain they are valid.
*/
VX_PARAMETER_STATE_OPTIONAL = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_PARAMETER_STATE) + 0x1,
VX_NODE_ATTRIBUTE_WEIGHT_BIAS_CACHE = VX_ENUM_BASE(VX_ID_KHRONOS, VX_ENUM_PARAMETER_STATE) + 0x2,
};
/*! \brief The border mode list.
@ -1912,4 +1929,12 @@ enum vx_map_flag_e {
VX_NOGAP_X = 1, /*!< \brief No Gap. */
};
enum vx_const_tensor_cache_mode
{
VX_PRELOAD_NULL = 0,
VX_PRELOAD_CONST_TENSOR_VIPSRAM = 1,
VX_PRELOAD_CONST_TENSOR_AXISRAM = 2,
VX_PRELOAD_TYPE_COUNT
};
#endif